/linux-4.1.27/drivers/gpu/drm/i915/ |
D | intel_ringbuffer.c | 37 intel_ring_initialized(struct intel_engine_cs *ring) in intel_ring_initialized() argument 39 struct drm_device *dev = ring->dev; in intel_ring_initialized() 45 struct intel_context *dctx = ring->default_context; in intel_ring_initialized() 46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; in intel_ring_initialized() 50 return ring->buffer && ring->buffer->obj; in intel_ring_initialized() 78 bool intel_ring_stopped(struct intel_engine_cs *ring) in intel_ring_stopped() argument 80 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_ring_stopped() 81 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); in intel_ring_stopped() 84 void __intel_ring_advance(struct intel_engine_cs *ring) in __intel_ring_advance() argument 86 struct intel_ringbuffer *ringbuf = ring->buffer; in __intel_ring_advance() [all …]
|
D | intel_lrc.c | 206 static int intel_lr_context_pin(struct intel_engine_cs *ring, 257 static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring, in execlists_ctx_descriptor() argument 260 struct drm_device *dev = ring->dev; in execlists_ctx_descriptor() 280 (ring->id == BCS || ring->id == VCS || in execlists_ctx_descriptor() 281 ring->id == VECS || ring->id == VCS2)) in execlists_ctx_descriptor() 287 static void execlists_elsp_write(struct intel_engine_cs *ring, in execlists_elsp_write() argument 291 struct drm_device *dev = ring->dev; in execlists_elsp_write() 298 temp = execlists_ctx_descriptor(ring, ctx_obj1); in execlists_elsp_write() 304 temp = execlists_ctx_descriptor(ring, ctx_obj0); in execlists_elsp_write() 309 I915_WRITE(RING_ELSP(ring), desc[1]); in execlists_elsp_write() [all …]
|
D | intel_ringbuffer.h | 32 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) argument 33 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) argument 35 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) argument 36 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) argument 38 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) argument 39 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) argument 41 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) argument 42 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) argument 44 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) argument 45 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) argument [all …]
|
D | i915_gem_context.c | 310 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_reset() local 311 struct intel_context *lctx = ring->last_context; in i915_gem_context_reset() 318 ring->last_context = NULL; in i915_gem_context_reset() 322 if (ring->default_context) in i915_gem_context_reset() 323 ring->default_context->legacy_hw_ctx.initialized = false; in i915_gem_context_reset() 335 if (WARN_ON(dev_priv->ring[RCS].default_context)) in i915_gem_context_init() 359 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_init() local 362 ring->default_context = ctx; in i915_gem_context_init() 374 struct intel_context *dctx = dev_priv->ring[RCS].default_context; in i915_gem_context_fini() 389 WARN_ON(!dev_priv->ring[RCS].last_context); in i915_gem_context_fini() [all …]
|
D | i915_gpu_error.c | 38 static const char *ring_str(int ring) in ring_str() argument 40 switch (ring) { in ring_str() 209 err_puts(m, err->ring != -1 ? " " : ""); in print_error_buffers() 210 err_puts(m, ring_str(err->ring)); in print_error_buffers() 248 struct drm_i915_error_ring *ring = &error->ring[ring_idx]; in i915_ring_error_state() local 250 if (!ring->valid) in i915_ring_error_state() 254 err_printf(m, " HEAD: 0x%08x\n", ring->head); in i915_ring_error_state() 255 err_printf(m, " TAIL: 0x%08x\n", ring->tail); in i915_ring_error_state() 256 err_printf(m, " CTL: 0x%08x\n", ring->ctl); in i915_ring_error_state() 257 err_printf(m, " HWS: 0x%08x\n", ring->hws); in i915_ring_error_state() [all …]
|
D | intel_lrc.h | 30 #define RING_ELSP(ring) ((ring)->mmio_base+0x230) argument 31 #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) argument 32 #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) argument 35 #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) argument 36 #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) argument 39 void intel_logical_ring_stop(struct intel_engine_cs *ring); 40 void intel_logical_ring_cleanup(struct intel_engine_cs *ring); 73 struct intel_engine_cs *ring); 74 void intel_lr_context_unpin(struct intel_engine_cs *ring, 82 struct intel_engine_cs *ring, [all …]
|
D | i915_cmd_parser.c | 520 static bool validate_cmds_sorted(struct intel_engine_cs *ring, in validate_cmds_sorted() argument 542 ring->id, i, j, curr, previous); in validate_cmds_sorted() 574 static bool validate_regs_sorted(struct intel_engine_cs *ring) in validate_regs_sorted() argument 576 return check_sorted(ring->id, ring->reg_table, ring->reg_count) && in validate_regs_sorted() 577 check_sorted(ring->id, ring->master_reg_table, in validate_regs_sorted() 578 ring->master_reg_count); in validate_regs_sorted() 602 static int init_hash_table(struct intel_engine_cs *ring, in init_hash_table() argument 608 hash_init(ring->cmd_hash); in init_hash_table() 623 hash_add(ring->cmd_hash, &desc_node->node, in init_hash_table() 631 static void fini_hash_table(struct intel_engine_cs *ring) in fini_hash_table() argument [all …]
|
D | i915_gem_execbuffer.c | 586 struct intel_engine_cs *ring, in i915_gem_execbuffer_reserve_vma() argument 683 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, in i915_gem_execbuffer_reserve() argument 691 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; in i915_gem_execbuffer_reserve() 694 i915_gem_retire_requests_ring(ring); in i915_gem_execbuffer_reserve() 749 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); in i915_gem_execbuffer_reserve() 759 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); in i915_gem_execbuffer_reserve() 782 struct intel_engine_cs *ring, in i915_gem_execbuffer_relocate_slow() argument 870 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); in i915_gem_execbuffer_relocate_slow() 895 i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, in i915_gem_execbuffer_move_to_gpu() argument 905 ret = i915_gem_object_sync(obj, ring); in i915_gem_execbuffer_move_to_gpu() [all …]
|
D | i915_trace.h | 458 __field(u32, ring) 464 struct intel_engine_cs *ring = 466 __entry->dev = ring->dev->primary->index; 467 __entry->ring = ring->id; 470 i915_trace_irq_get(ring, req); 474 __entry->dev, __entry->ring, __entry->seqno, __entry->flags) 478 TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush), 479 TP_ARGS(ring, invalidate, flush), 483 __field(u32, ring) 489 __entry->dev = ring->dev->primary->index; [all …]
|
D | i915_gem_render_state.c | 131 int i915_gem_render_state_prepare(struct intel_engine_cs *ring, in i915_gem_render_state_prepare() argument 136 if (WARN_ON(ring->id != RCS)) in i915_gem_render_state_prepare() 139 ret = render_state_init(so, ring->dev); in i915_gem_render_state_prepare() 155 int i915_gem_render_state_init(struct intel_engine_cs *ring) in i915_gem_render_state_init() argument 160 ret = i915_gem_render_state_prepare(ring, &so); in i915_gem_render_state_init() 167 ret = ring->dispatch_execbuffer(ring, in i915_gem_render_state_init() 174 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); in i915_gem_render_state_init() 176 ret = __i915_add_request(ring, NULL, so.obj); in i915_gem_render_state_init()
|
D | i915_gem.c | 1164 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); in i915_gem_check_olr() 1167 if (req == req->ring->outstanding_lazy_request) in i915_gem_check_olr() 1168 ret = i915_add_request(req->ring); in i915_gem_check_olr() 1179 struct intel_engine_cs *ring) in missed_irq() argument 1181 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); in missed_irq() 1215 struct intel_engine_cs *ring = i915_gem_request_get_ring(req); in __i915_wait_request() local 1216 struct drm_device *dev = ring->dev; in __i915_wait_request() 1219 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); in __i915_wait_request() 1233 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { in __i915_wait_request() 1241 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) in __i915_wait_request() [all …]
|
D | i915_irq.c | 989 struct intel_engine_cs *ring) in notify_ring() argument 991 if (!intel_ring_initialized(ring)) in notify_ring() 994 trace_i915_gem_request_notify(ring); in notify_ring() 996 wake_up_all(&ring->irq_queue); in notify_ring() 1254 notify_ring(dev, &dev_priv->ring[RCS]); in ilk_gt_irq_handler() 1256 notify_ring(dev, &dev_priv->ring[VCS]); in ilk_gt_irq_handler() 1266 notify_ring(dev, &dev_priv->ring[RCS]); in snb_gt_irq_handler() 1268 notify_ring(dev, &dev_priv->ring[VCS]); in snb_gt_irq_handler() 1270 notify_ring(dev, &dev_priv->ring[BCS]); in snb_gt_irq_handler() 1285 struct intel_engine_cs *ring; in gen8_gt_irq_handler() local [all …]
|
D | intel_overlay.c | 216 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; in intel_overlay_do_wait_request() local 221 ring->outstanding_lazy_request); in intel_overlay_do_wait_request() 222 ret = i915_add_request(ring); in intel_overlay_do_wait_request() 241 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; in intel_overlay_on() local 249 ret = intel_ring_begin(ring, 4); in intel_overlay_on() 253 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); in intel_overlay_on() 254 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); in intel_overlay_on() 255 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); in intel_overlay_on() 256 intel_ring_emit(ring, MI_NOOP); in intel_overlay_on() 257 intel_ring_advance(ring); in intel_overlay_on() [all …]
|
D | i915_gem_gtt.c | 442 static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, in gen8_write_pdp() argument 449 ret = intel_ring_begin(ring, 6); in gen8_write_pdp() 453 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); in gen8_write_pdp() 454 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); in gen8_write_pdp() 455 intel_ring_emit(ring, (u32)(val >> 32)); in gen8_write_pdp() 456 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); in gen8_write_pdp() 457 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); in gen8_write_pdp() 458 intel_ring_emit(ring, (u32)(val)); in gen8_write_pdp() 459 intel_ring_advance(ring); in gen8_write_pdp() 465 struct intel_engine_cs *ring) in gen8_mm_switch() argument [all …]
|
D | i915_debugfs.c | 570 struct intel_engine_cs *ring = in i915_gem_pageflip_info() local 574 ring->name, in i915_gem_pageflip_info() 577 ring->get_seqno(ring, true), in i915_gem_pageflip_info() 645 struct intel_engine_cs *ring; in i915_gem_request_info() local 654 for_each_ring(ring, dev_priv, i) { in i915_gem_request_info() 655 if (list_empty(&ring->request_list)) in i915_gem_request_info() 658 seq_printf(m, "%s requests:\n", ring->name); in i915_gem_request_info() 660 &ring->request_list, in i915_gem_request_info() 677 struct intel_engine_cs *ring) in i915_ring_seqno_info() argument 679 if (ring->get_seqno) { in i915_ring_seqno_info() [all …]
|
D | intel_frontbuffer.c | 108 struct intel_engine_cs *ring) in intel_mark_fb_busy() argument 134 struct intel_engine_cs *ring, in intel_fb_obj_invalidate() argument 145 if (ring) { in intel_fb_obj_invalidate() 154 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); in intel_fb_obj_invalidate()
|
D | i915_gem_render_state.h | 42 int i915_gem_render_state_init(struct intel_engine_cs *ring); 44 int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
/linux-4.1.27/drivers/thunderbolt/ |
D | nhi.c | 22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument 25 static int ring_interrupt_index(struct tb_ring *ring) in ring_interrupt_index() argument 27 int bit = ring->hop; in ring_interrupt_index() 28 if (!ring->is_tx) in ring_interrupt_index() 29 bit += ring->nhi->hop_count; in ring_interrupt_index() 38 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument 40 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; in ring_interrupt_active() 41 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active() 44 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active() 50 dev_info(&ring->nhi->pdev->dev, in ring_interrupt_active() [all …]
|
D | nhi.h | 69 void ring_start(struct tb_ring *ring); 70 void ring_stop(struct tb_ring *ring); 71 void ring_free(struct tb_ring *ring); 73 int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); 89 static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) in ring_rx() argument 91 WARN_ON(ring->is_tx); in ring_rx() 92 return __ring_enqueue(ring, frame); in ring_rx() 108 static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) in ring_tx() argument 110 WARN_ON(!ring->is_tx); in ring_tx() 111 return __ring_enqueue(ring, frame); in ring_tx()
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
D | radeon_ring.c | 45 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 58 struct radeon_ring *ring) in radeon_ring_supports_scratch_reg() argument 60 switch (ring->idx) { in radeon_ring_supports_scratch_reg() 78 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) in radeon_ring_free_size() argument 80 uint32_t rptr = radeon_ring_get_rptr(rdev, ring); in radeon_ring_free_size() 83 ring->ring_free_dw = rptr + (ring->ring_size / 4); in radeon_ring_free_size() 84 ring->ring_free_dw -= ring->wptr; in radeon_ring_free_size() 85 ring->ring_free_dw &= ring->ptr_mask; in radeon_ring_free_size() 86 if (!ring->ring_free_dw) { in radeon_ring_free_size() 88 ring->ring_free_dw = ring->ring_size / 4; in radeon_ring_free_size() [all …]
|
D | evergreen_dma.c | 44 struct radeon_ring *ring = &rdev->ring[fence->ring]; in evergreen_dma_fence_ring_emit() local 45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in evergreen_dma_fence_ring_emit() 47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); in evergreen_dma_fence_ring_emit() 48 radeon_ring_write(ring, addr & 0xfffffffc); in evergreen_dma_fence_ring_emit() 49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); in evergreen_dma_fence_ring_emit() 50 radeon_ring_write(ring, fence->seq); in evergreen_dma_fence_ring_emit() 52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); in evergreen_dma_fence_ring_emit() 54 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); in evergreen_dma_fence_ring_emit() 55 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); in evergreen_dma_fence_ring_emit() 56 radeon_ring_write(ring, 1); in evergreen_dma_fence_ring_emit() [all …]
|
D | radeon_fence.c | 62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) in radeon_fence_write() argument 64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_write() 83 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) in radeon_fence_read() argument 85 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_read() 108 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) in radeon_fence_schedule_check() argument 115 &rdev->fence_drv[ring].lockup_work, in radeon_fence_schedule_check() 131 int ring) in radeon_fence_emit() argument 133 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_emit() 142 (*fence)->ring = ring; in radeon_fence_emit() 145 &rdev->fence_queue.lock, rdev->fence_context + ring, seq); in radeon_fence_emit() [all …]
|
D | r600_dma.c | 52 struct radeon_ring *ring) in r600_dma_get_rptr() argument 57 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_dma_get_rptr() 73 struct radeon_ring *ring) in r600_dma_get_wptr() argument 87 struct radeon_ring *ring) in r600_dma_set_wptr() argument 89 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); in r600_dma_set_wptr() 109 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; in r600_dma_stop() 122 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in r600_dma_resume() local 131 rb_bufsz = order_base_2(ring->ring_size / 4); in r600_dma_resume() 151 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); in r600_dma_resume() 167 ring->wptr = 0; in r600_dma_resume() [all …]
|
D | uvd_v1_0.c | 40 struct radeon_ring *ring) in uvd_v1_0_get_rptr() argument 54 struct radeon_ring *ring) in uvd_v1_0_get_wptr() argument 68 struct radeon_ring *ring) in uvd_v1_0_set_wptr() argument 70 WREG32(UVD_RBC_RB_WPTR, ring->wptr); in uvd_v1_0_set_wptr() 84 struct radeon_ring *ring = &rdev->ring[fence->ring]; in uvd_v1_0_fence_emit() local 85 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; in uvd_v1_0_fence_emit() 87 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); in uvd_v1_0_fence_emit() 88 radeon_ring_write(ring, addr & 0xffffffff); in uvd_v1_0_fence_emit() 89 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); in uvd_v1_0_fence_emit() 90 radeon_ring_write(ring, fence->seq); in uvd_v1_0_fence_emit() [all …]
|
D | vce_v1_0.c | 43 struct radeon_ring *ring) in vce_v1_0_get_rptr() argument 45 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_get_rptr() 60 struct radeon_ring *ring) in vce_v1_0_get_wptr() argument 62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_get_wptr() 77 struct radeon_ring *ring) in vce_v1_0_set_wptr() argument 79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_set_wptr() 80 WREG32(VCE_RB_WPTR, ring->wptr); in vce_v1_0_set_wptr() 82 WREG32(VCE_RB_WPTR2, ring->wptr); in vce_v1_0_set_wptr() 94 struct radeon_ring *ring; in vce_v1_0_start() local 100 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; in vce_v1_0_start() [all …]
|
D | cik_sdma.c | 64 struct radeon_ring *ring) in cik_sdma_get_rptr() argument 69 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cik_sdma_get_rptr() 71 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_rptr() 91 struct radeon_ring *ring) in cik_sdma_get_wptr() argument 95 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_wptr() 112 struct radeon_ring *ring) in cik_sdma_set_wptr() argument 116 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_set_wptr() 121 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cik_sdma_set_wptr() 136 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cik_sdma_ring_ib_execute() local 137 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf; in cik_sdma_ring_ib_execute() [all …]
|
D | ni_dma.c | 54 struct radeon_ring *ring) in cayman_dma_get_rptr() argument 59 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cayman_dma_get_rptr() 61 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_rptr() 81 struct radeon_ring *ring) in cayman_dma_get_wptr() argument 85 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_wptr() 102 struct radeon_ring *ring) in cayman_dma_set_wptr() argument 106 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_set_wptr() 111 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cayman_dma_set_wptr() 125 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cayman_dma_ring_ib_execute() local 126 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; in cayman_dma_ring_ib_execute() [all …]
|
D | radeon_trace.h | 33 __field(u32, ring) 39 __entry->ring = p->ring; 42 p->rdev, p->ring); 45 __entry->ring, __entry->dw, 50 TP_PROTO(unsigned vmid, int ring), 51 TP_ARGS(vmid, ring), 54 __field(u32, ring) 59 __entry->ring = ring; 61 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) 107 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), [all …]
|
D | uvd_v2_2.c | 42 struct radeon_ring *ring = &rdev->ring[fence->ring]; in uvd_v2_2_fence_emit() local 43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; in uvd_v2_2_fence_emit() 45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); in uvd_v2_2_fence_emit() 46 radeon_ring_write(ring, fence->seq); in uvd_v2_2_fence_emit() 47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); in uvd_v2_2_fence_emit() 48 radeon_ring_write(ring, lower_32_bits(addr)); in uvd_v2_2_fence_emit() 49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); in uvd_v2_2_fence_emit() 50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); in uvd_v2_2_fence_emit() 51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); in uvd_v2_2_fence_emit() 52 radeon_ring_write(ring, 0); in uvd_v2_2_fence_emit() [all …]
|
D | si_dma.c | 41 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) in si_dma_is_lockup() argument 46 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in si_dma_is_lockup() 52 radeon_ring_lockup_update(rdev, ring); in si_dma_is_lockup() 55 return radeon_ring_test_lockup(rdev, ring); in si_dma_is_lockup() 187 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, in si_dma_vm_flush() argument 191 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); in si_dma_vm_flush() 193 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2)); in si_dma_vm_flush() 195 …radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> … in si_dma_vm_flush() 197 radeon_ring_write(ring, pd_addr >> 12); in si_dma_vm_flush() 200 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); in si_dma_vm_flush() [all …]
|
D | ni.c | 1372 int ring, u32 cp_int_cntl) in cayman_cp_int_cntl_setup() argument 1376 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); in cayman_cp_int_cntl_setup() 1386 struct radeon_ring *ring = &rdev->ring[fence->ring]; in cayman_fence_ring_emit() local 1387 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in cayman_fence_ring_emit() 1392 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); in cayman_fence_ring_emit() 1393 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); in cayman_fence_ring_emit() 1394 radeon_ring_write(ring, 0xFFFFFFFF); in cayman_fence_ring_emit() 1395 radeon_ring_write(ring, 0); in cayman_fence_ring_emit() 1396 radeon_ring_write(ring, 10); /* poll interval */ in cayman_fence_ring_emit() 1398 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); in cayman_fence_ring_emit() [all …]
|
D | radeon_ib.c | 55 int radeon_ib_get(struct radeon_device *rdev, int ring, in radeon_ib_get() argument 69 ib->ring = ring; in radeon_ib_get() 125 struct radeon_ring *ring = &rdev->ring[ib->ring]; in radeon_ib_schedule() local 128 if (!ib->length_dw || !ring->ready) { in radeon_ib_schedule() 135 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); in radeon_ib_schedule() 144 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); in radeon_ib_schedule() 149 r = radeon_sync_rings(rdev, &ib->sync, ib->ring); in radeon_ib_schedule() 152 radeon_ring_unlock_undo(rdev, ring); in radeon_ib_schedule() 157 radeon_vm_flush(rdev, ib->vm, ib->ring, in radeon_ib_schedule() 161 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); in radeon_ib_schedule() [all …]
|
D | rv770_dma.c | 50 struct radeon_ring *ring = &rdev->ring[ring_index]; in rv770_copy_dma() local 59 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); in rv770_copy_dma() 67 radeon_sync_rings(rdev, &sync, ring->idx); in rv770_copy_dma() 74 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); in rv770_copy_dma() 75 radeon_ring_write(ring, dst_offset & 0xfffffffc); in rv770_copy_dma() 76 radeon_ring_write(ring, src_offset & 0xfffffffc); in rv770_copy_dma() 77 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); in rv770_copy_dma() 78 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); in rv770_copy_dma() 83 r = radeon_fence_emit(rdev, &fence, ring->idx); in rv770_copy_dma() 85 radeon_ring_unlock_undo(rdev, ring); in rv770_copy_dma() [all …]
|
D | radeon_vce.c | 328 int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, in radeon_vce_get_create_msg() argument 336 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); in radeon_vce_get_create_msg() 395 int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, in radeon_vce_get_destroy_msg() argument 403 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); in radeon_vce_get_destroy_msg() 678 struct radeon_ring *ring, in radeon_vce_semaphore_emit() argument 684 radeon_ring_write(ring, VCE_CMD_SEMAPHORE); in radeon_vce_semaphore_emit() 685 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); in radeon_vce_semaphore_emit() 686 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); in radeon_vce_semaphore_emit() 687 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); in radeon_vce_semaphore_emit() 689 radeon_ring_write(ring, VCE_CMD_END); in radeon_vce_semaphore_emit() [all …]
|
D | uvd_v3_1.c | 41 struct radeon_ring *ring, in uvd_v3_1_semaphore_emit() argument 47 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); in uvd_v3_1_semaphore_emit() 48 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); in uvd_v3_1_semaphore_emit() 50 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); in uvd_v3_1_semaphore_emit() 51 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); in uvd_v3_1_semaphore_emit() 53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); in uvd_v3_1_semaphore_emit() 54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); in uvd_v3_1_semaphore_emit()
|
D | radeon_sync.c | 71 other = sync->sync_to[fence->ring]; in radeon_sync_fence() 72 sync->sync_to[fence->ring] = radeon_fence_later(fence, other); in radeon_sync_fence() 139 int ring) in radeon_sync_rings() argument 149 if (!radeon_fence_need_sync(fence, ring)) in radeon_sync_rings() 153 if (!rdev->ring[i].ready) { in radeon_sync_rings() 172 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); in radeon_sync_rings() 179 radeon_ring_undo(&rdev->ring[i]); in radeon_sync_rings() 187 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { in radeon_sync_rings() 189 radeon_ring_undo(&rdev->ring[i]); in radeon_sync_rings() 196 radeon_ring_commit(rdev, &rdev->ring[i], false); in radeon_sync_rings() [all …]
|
D | r300.c | 186 struct radeon_ring *ring = &rdev->ring[fence->ring]; in r300_fence_ring_emit() local 191 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); in r300_fence_ring_emit() 192 radeon_ring_write(ring, 0); in r300_fence_ring_emit() 193 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); in r300_fence_ring_emit() 194 radeon_ring_write(ring, 0); in r300_fence_ring_emit() 196 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); in r300_fence_ring_emit() 197 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); in r300_fence_ring_emit() 198 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); in r300_fence_ring_emit() 199 radeon_ring_write(ring, R300_ZC_FLUSH); in r300_fence_ring_emit() 201 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); in r300_fence_ring_emit() [all …]
|
D | radeon_semaphore.c | 61 struct radeon_ring *ring = &rdev->ring[ridx]; in radeon_semaphore_emit_signal() local 65 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { in radeon_semaphore_emit_signal() 69 ring->last_semaphore_signal_addr = semaphore->gpu_addr; in radeon_semaphore_emit_signal() 78 struct radeon_ring *ring = &rdev->ring[ridx]; in radeon_semaphore_emit_wait() local 82 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { in radeon_semaphore_emit_wait() 86 ring->last_semaphore_wait_addr = semaphore->gpu_addr; in radeon_semaphore_emit_wait()
|
D | cik.c | 3842 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) in cik_ring_test() argument 3855 r = radeon_ring_lock(rdev, ring, 3); in cik_ring_test() 3857 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); in cik_ring_test() 3861 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); in cik_ring_test() 3862 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); in cik_ring_test() 3863 radeon_ring_write(ring, 0xDEADBEEF); in cik_ring_test() 3864 radeon_ring_unlock_commit(rdev, ring, false); in cik_ring_test() 3873 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); in cik_ring_test() 3876 ring->idx, scratch, tmp); in cik_ring_test() 3894 struct radeon_ring *ring = &rdev->ring[ridx]; in cik_hdp_flush_cp_ring_emit() local [all …]
|
D | si.c | 3371 struct radeon_ring *ring = &rdev->ring[fence->ring]; in si_fence_ring_emit() local 3372 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in si_fence_ring_emit() 3375 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); in si_fence_ring_emit() 3376 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); in si_fence_ring_emit() 3377 radeon_ring_write(ring, 0); in si_fence_ring_emit() 3378 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); in si_fence_ring_emit() 3379 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | in si_fence_ring_emit() 3383 radeon_ring_write(ring, 0xFFFFFFFF); in si_fence_ring_emit() 3384 radeon_ring_write(ring, 0); in si_fence_ring_emit() 3385 radeon_ring_write(ring, 10); /* poll interval */ in si_fence_ring_emit() [all …]
|
D | radeon_asic.h | 73 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 113 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 147 struct radeon_ring *ring); 149 struct radeon_ring *ring); 151 struct radeon_ring *ring); 171 extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 287 void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 331 struct radeon_ring *ring, 335 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 342 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); [all …]
|
D | radeon_test.c | 40 int i, r, ring; in radeon_do_test_moves() local 44 ring = radeon_copy_dma_ring_index(rdev); in radeon_do_test_moves() 47 ring = radeon_copy_blit_ring_index(rdev); in radeon_do_test_moves() 119 if (ring == R600_RING_TYPE_DMA_INDEX) in radeon_do_test_moves() 170 if (ring == R600_RING_TYPE_DMA_INDEX) in radeon_do_test_moves() 262 struct radeon_ring *ring, in radeon_test_create_and_emit_fence() argument 265 uint32_t handle = ring->idx ^ 0xdeafbeef; in radeon_test_create_and_emit_fence() 268 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { in radeon_test_create_and_emit_fence() 269 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); in radeon_test_create_and_emit_fence() 275 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); in radeon_test_create_and_emit_fence() [all …]
|
D | radeon_cs.c | 124 if (p->ring == R600_RING_TYPE_UVD_INDEX && in radeon_cs_parser_relocs() 182 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); in radeon_cs_parser_relocs() 190 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) in radeon_cs_get_ring() argument 194 switch (ring) { in radeon_cs_get_ring() 196 DRM_ERROR("unknown ring id: %d\n", ring); in radeon_cs_get_ring() 199 p->ring = RADEON_RING_TYPE_GFX_INDEX; in radeon_cs_get_ring() 204 p->ring = CAYMAN_RING_TYPE_CP1_INDEX; in radeon_cs_get_ring() 206 p->ring = CAYMAN_RING_TYPE_CP2_INDEX; in radeon_cs_get_ring() 208 p->ring = RADEON_RING_TYPE_GFX_INDEX; in radeon_cs_get_ring() 213 p->ring = R600_RING_TYPE_DMA_INDEX; in radeon_cs_get_ring() [all …]
|
D | r600.c | 1862 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) in r600_gfx_is_lockup() argument 1869 radeon_ring_lockup_update(rdev, ring); in r600_gfx_is_lockup() 1872 return radeon_ring_test_lockup(rdev, ring); in r600_gfx_is_lockup() 2370 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in r600_cp_stop() 2565 struct radeon_ring *ring) in r600_gfx_get_rptr() argument 2570 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_gfx_get_rptr() 2578 struct radeon_ring *ring) in r600_gfx_get_wptr() argument 2588 struct radeon_ring *ring) in r600_gfx_set_wptr() argument 2590 WREG32(R600_CP_RB_WPTR, ring->wptr); in r600_gfx_set_wptr() 2638 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r600_cp_start() local [all …]
|
D | rv515.c | 62 void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) in rv515_ring_start() argument 66 r = radeon_ring_lock(rdev, ring, 64); in rv515_ring_start() 70 radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0)); in rv515_ring_start() 71 radeon_ring_write(ring, in rv515_ring_start() 76 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0)); in rv515_ring_start() 77 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); in rv515_ring_start() 78 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); in rv515_ring_start() 79 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); in rv515_ring_start() 80 radeon_ring_write(ring, PACKET0(GB_SELECT, 0)); in rv515_ring_start() 81 radeon_ring_write(ring, 0); in rv515_ring_start() [all …]
|
D | r100.c | 839 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) in r100_ring_hdp_flush() argument 841 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); in r100_ring_hdp_flush() 842 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | in r100_ring_hdp_flush() 844 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); in r100_ring_hdp_flush() 845 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); in r100_ring_hdp_flush() 853 struct radeon_ring *ring = &rdev->ring[fence->ring]; in r100_fence_ring_emit() local 857 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); in r100_fence_ring_emit() 858 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); in r100_fence_ring_emit() 859 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); in r100_fence_ring_emit() 860 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); in r100_fence_ring_emit() [all …]
|
D | r420.c | 209 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r420_cp_errata_init() local 218 radeon_ring_lock(rdev, ring, 8); in r420_cp_errata_init() 219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); in r420_cp_errata_init() 220 radeon_ring_write(ring, rdev->config.r300.resync_scratch); in r420_cp_errata_init() 221 radeon_ring_write(ring, 0xDEADBEEF); in r420_cp_errata_init() 222 radeon_ring_unlock_commit(rdev, ring, false); in r420_cp_errata_init() 227 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r420_cp_errata_fini() local 232 radeon_ring_lock(rdev, ring, 8); in r420_cp_errata_fini() 233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); in r420_cp_errata_fini() 234 radeon_ring_write(ring, R300_RB3D_DC_FINISH); in r420_cp_errata_fini() [all …]
|
D | r200.c | 89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r200_copy_dma() local 99 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); in r200_copy_dma() 105 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); in r200_copy_dma() 106 radeon_ring_write(ring, (1 << 16)); in r200_copy_dma() 113 radeon_ring_write(ring, PACKET0(0x720, 2)); in r200_copy_dma() 114 radeon_ring_write(ring, src_offset); in r200_copy_dma() 115 radeon_ring_write(ring, dst_offset); in r200_copy_dma() 116 radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30)); in r200_copy_dma() 120 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); in r200_copy_dma() 121 radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); in r200_copy_dma() [all …]
|
D | radeon.h | 372 unsigned ring; member 378 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 381 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring); 382 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 383 void radeon_fence_process(struct radeon_device *rdev, int ring); 386 int radeon_fence_wait_next(struct radeon_device *rdev, int ring); 387 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); 393 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); 394 bool radeon_fence_need_sync(struct radeon_fence *fence, int ring); 395 void radeon_fence_note_sync(struct radeon_fence *fence, int ring); [all …]
|
D | radeon_cp.c | 654 dev_priv->ring.tail = cur_read_ptr; in radeon_do_cp_reset() 792 dev_priv->ring.tail = cur_read_ptr; in radeon_cp_init_ring_buffer() 812 (dev_priv->ring.fetch_size_l2ow << 18) | in radeon_cp_init_ring_buffer() 813 (dev_priv->ring.rptr_update_l2qw << 8) | in radeon_cp_init_ring_buffer() 814 dev_priv->ring.size_l2qw); in radeon_cp_init_ring_buffer() 817 (dev_priv->ring.fetch_size_l2ow << 18) | in radeon_cp_init_ring_buffer() 818 (dev_priv->ring.rptr_update_l2qw << 8) | in radeon_cp_init_ring_buffer() 819 dev_priv->ring.size_l2qw); in radeon_cp_init_ring_buffer() 1443 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; in radeon_do_init_cp() 1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle in radeon_do_init_cp() [all …]
|
D | evergreen.c | 140 int ring, u32 cp_int_cntl); 2973 struct radeon_ring *ring = &rdev->ring[ib->ring]; in evergreen_ring_ib_execute() local 2977 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); in evergreen_ring_ib_execute() 2978 radeon_ring_write(ring, 1); in evergreen_ring_ib_execute() 2980 if (ring->rptr_save_reg) { in evergreen_ring_ib_execute() 2981 next_rptr = ring->wptr + 3 + 4; in evergreen_ring_ib_execute() 2982 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); in evergreen_ring_ib_execute() 2983 radeon_ring_write(ring, ((ring->rptr_save_reg - in evergreen_ring_ib_execute() 2985 radeon_ring_write(ring, next_rptr); in evergreen_ring_ib_execute() 2987 next_rptr = ring->wptr + 5 + 4; in evergreen_ring_ib_execute() [all …]
|
D | rv770.c | 1083 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in r700_cp_stop() 1126 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r700_cp_fini() local 1128 radeon_ring_fini(rdev, ring); in r700_cp_fini() 1129 radeon_scratch_free(rdev, ring->rptr_save_reg); in r700_cp_fini() 1686 struct radeon_ring *ring; in rv770_startup() local 1735 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; in rv770_startup() 1752 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in rv770_startup() 1753 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, in rv770_startup() 1758 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in rv770_startup() 1759 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, in rv770_startup() [all …]
|
D | radeon_vm.c | 178 struct radeon_vm *vm, int ring) in radeon_vm_grab_id() argument 181 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id() 201 trace_radeon_vm_grab_id(i, ring); in radeon_vm_grab_id() 205 if (radeon_fence_is_earlier(fence, best[fence->ring])) { in radeon_vm_grab_id() 206 best[fence->ring] = fence; in radeon_vm_grab_id() 207 choices[fence->ring == ring ? 0 : 1] = i; in radeon_vm_grab_id() 214 trace_radeon_vm_grab_id(choices[i], ring); in radeon_vm_grab_id() 238 int ring, struct radeon_fence *updates) in radeon_vm_flush() argument 241 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_flush() 246 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); in radeon_vm_flush() [all …]
|
D | radeon_irq_kms.c | 339 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) in radeon_irq_kms_sw_irq_get() argument 346 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) { in radeon_irq_kms_sw_irq_get() 363 bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring) in radeon_irq_kms_sw_irq_get_delayed() argument 365 return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1; in radeon_irq_kms_sw_irq_get_delayed() 378 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) in radeon_irq_kms_sw_irq_put() argument 385 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) { in radeon_irq_kms_sw_irq_put()
|
/linux-4.1.27/drivers/net/wireless/b43legacy/ |
D | dma.c | 45 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, in op32_idx2desc() argument 51 *meta = &(ring->meta[slot]); in op32_idx2desc() 52 desc = ring->descbase; in op32_idx2desc() 58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, in op32_fill_descriptor() argument 63 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor() 70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor() 75 addr |= ring->dev->dma.translation; in op32_fill_descriptor() 76 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor() 78 if (slot == ring->nr_slots - 1) in op32_fill_descriptor() 93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) in op32_poke_tx() argument [all …]
|
D | dma.h | 167 u32 b43legacy_dma_read(struct b43legacy_dmaring *ring, in b43legacy_dma_read() argument 170 return b43legacy_read32(ring->dev, ring->mmio_base + offset); in b43legacy_dma_read() 174 void b43legacy_dma_write(struct b43legacy_dmaring *ring, in b43legacy_dma_write() argument 177 b43legacy_write32(ring->dev, ring->mmio_base + offset, value); in b43legacy_dma_write() 192 void b43legacy_dma_rx(struct b43legacy_dmaring *ring); 218 void b43legacy_dma_rx(struct b43legacy_dmaring *ring) in b43legacy_dma_rx() argument
|
/linux-4.1.27/drivers/net/wireless/b43/ |
D | dma.c | 85 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, in op32_idx2desc() argument 91 *meta = &(ring->meta[slot]); in op32_idx2desc() 92 desc = ring->descbase; in op32_idx2desc() 98 static void op32_fill_descriptor(struct b43_dmaring *ring, in op32_fill_descriptor() argument 103 struct b43_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor() 110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor() 112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op32_fill_descriptor() 113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op32_fill_descriptor() 116 if (slot == ring->nr_slots - 1) in op32_fill_descriptor() 131 static void op32_poke_tx(struct b43_dmaring *ring, int slot) in op32_poke_tx() argument [all …]
|
D | dma.h | 198 struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring, 202 void (*fill_descriptor) (struct b43_dmaring * ring, 206 void (*poke_tx) (struct b43_dmaring * ring, int slot); 207 void (*tx_suspend) (struct b43_dmaring * ring); 208 void (*tx_resume) (struct b43_dmaring * ring); 209 int (*get_current_rxslot) (struct b43_dmaring * ring); 210 void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); 277 static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) in b43_dma_read() argument 279 return b43_read32(ring->dev, ring->mmio_base + offset); in b43_dma_read() 282 static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) in b43_dma_write() argument [all …]
|
/linux-4.1.27/net/rds/ |
D | ib_ring.c | 66 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_init() argument 68 memset(ring, 0, sizeof(*ring)); in rds_ib_ring_init() 69 ring->w_nr = nr; in rds_ib_ring_init() 70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_ib_ring_init() 73 static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) in __rds_ib_ring_used() argument 78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_ib_ring_used() 79 BUG_ON(diff > ring->w_nr); in __rds_ib_ring_used() 84 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_resize() argument 88 BUG_ON(__rds_ib_ring_used(ring)); in rds_ib_ring_resize() 89 ring->w_nr = nr; in rds_ib_ring_resize() [all …]
|
D | iw_ring.c | 66 void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr) in rds_iw_ring_init() argument 68 memset(ring, 0, sizeof(*ring)); in rds_iw_ring_init() 69 ring->w_nr = nr; in rds_iw_ring_init() 70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_iw_ring_init() 73 static inline u32 __rds_iw_ring_used(struct rds_iw_work_ring *ring) in __rds_iw_ring_used() argument 78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_iw_ring_used() 79 BUG_ON(diff > ring->w_nr); in __rds_iw_ring_used() 84 void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr) in rds_iw_ring_resize() argument 88 BUG_ON(__rds_iw_ring_used(ring)); in rds_iw_ring_resize() 89 ring->w_nr = nr; in rds_iw_ring_resize() [all …]
|
D | ib.h | 330 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); 331 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr); 332 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos); 333 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val); 334 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val); 335 int rds_ib_ring_empty(struct rds_ib_work_ring *ring); 336 int rds_ib_ring_low(struct rds_ib_work_ring *ring); 337 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring); 338 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
|
D | iw.h | 339 void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr); 340 void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr); 341 u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos); 342 void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val); 343 void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val); 344 int rds_iw_ring_empty(struct rds_iw_work_ring *ring); 345 int rds_iw_ring_low(struct rds_iw_work_ring *ring); 346 u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring); 347 u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest);
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 53 struct mlx4_en_tx_ring *ring; in mlx4_en_create_tx_ring() local 57 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring() 58 if (!ring) { in mlx4_en_create_tx_ring() 59 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in mlx4_en_create_tx_ring() 60 if (!ring) { in mlx4_en_create_tx_ring() 66 ring->size = size; in mlx4_en_create_tx_ring() 67 ring->size_mask = size - 1; in mlx4_en_create_tx_ring() 68 ring->stride = stride; in mlx4_en_create_tx_ring() 69 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; in mlx4_en_create_tx_ring() 72 ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); in mlx4_en_create_tx_ring() [all …]
|
D | en_rx.c | 154 struct mlx4_en_rx_ring *ring) in mlx4_en_init_allocator() argument 162 if (mlx4_alloc_pages(priv, &ring->page_alloc[i], in mlx4_en_init_allocator() 167 i, ring->page_alloc[i].page_size, in mlx4_en_init_allocator() 168 atomic_read(&ring->page_alloc[i].page->_count)); in mlx4_en_init_allocator() 176 page_alloc = &ring->page_alloc[i]; in mlx4_en_init_allocator() 188 struct mlx4_en_rx_ring *ring) in mlx4_en_destroy_allocator() argument 196 page_alloc = &ring->page_alloc[i]; in mlx4_en_destroy_allocator() 212 struct mlx4_en_rx_ring *ring, int index) in mlx4_en_init_rx_desc() argument 214 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; in mlx4_en_init_rx_desc() 228 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; in mlx4_en_init_rx_desc() [all …]
|
D | en_cq.c | 48 int entries, int ring, enum cq_type mode, in mlx4_en_create_cq() argument 67 cq->ring = ring; in mlx4_en_create_cq() 122 cq->ring); in mlx4_en_activate_cq() 126 cq->vector = (cq->ring + 1 + priv->port) in mlx4_en_activate_cq() 134 cq->vector = (cq->ring + 1 + priv->port) % in mlx4_en_activate_cq() 152 cq->size = priv->rx_ring[cq->ring]->actual_size; in mlx4_en_activate_cq() 171 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; in mlx4_en_activate_cq() local 174 ring->affinity_mask); in mlx4_en_activate_cq()
|
/linux-4.1.27/drivers/crypto/qat/qat_common/ |
D | adf_transport.c | 80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) in adf_reserve_ring() argument 83 if (bank->ring_mask & (1 << ring)) { in adf_reserve_ring() 87 bank->ring_mask |= (1 << ring); in adf_reserve_ring() 92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) in adf_unreserve_ring() argument 95 bank->ring_mask &= ~(1 << ring); in adf_unreserve_ring() 99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) in adf_enable_ring_irq() argument 102 bank->irq_mask |= (1 << ring); in adf_enable_ring_irq() 109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) in adf_disable_ring_irq() argument 112 bank->irq_mask &= ~(1 << ring); in adf_disable_ring_irq() 117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) in adf_send_message() argument [all …]
|
D | adf_transport_debug.c | 59 struct adf_etr_ring_data *ring = sfile->private; in adf_ring_start() local 65 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / in adf_ring_start() 66 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) in adf_ring_start() 69 return ring->base_addr + in adf_ring_start() 70 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_start() 75 struct adf_etr_ring_data *ring = sfile->private; in adf_ring_next() local 77 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / in adf_ring_next() 78 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) in adf_ring_next() 81 return ring->base_addr + in adf_ring_next() 82 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_next() [all …]
|
D | adf_transport_access_macros.h | 118 #define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ argument 120 ADF_RING_CSR_RING_HEAD + (ring << 2)) 121 #define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ argument 123 ADF_RING_CSR_RING_TAIL + (ring << 2)) 127 #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ argument 129 ADF_RING_CSR_RING_CONFIG + (ring << 2), value) 130 #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ argument 136 ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ 138 ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ 140 #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ argument [all …]
|
D | adf_transport_internal.h | 99 int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name); 100 void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring); 109 static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, in adf_ring_debugfs_add() argument 115 #define adf_ring_debugfs_rm(ring) do {} while (0) argument
|
D | adf_transport.h | 61 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); 62 void adf_remove_ring(struct adf_etr_ring_data *ring);
|
/linux-4.1.27/tools/testing/selftests/net/ |
D | psock_tpacket.c | 78 struct ring { struct 83 void (*walk)(int sock, struct ring *ring); argument 232 static void walk_v1_v2_rx(int sock, struct ring *ring) in walk_v1_v2_rx() argument 239 bug_on(ring->type != PACKET_RX_RING); in walk_v1_v2_rx() 252 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, in walk_v1_v2_rx() 253 ring->version)) { in walk_v1_v2_rx() 254 ppd.raw = ring->rd[frame_num].iov_base; in walk_v1_v2_rx() 256 switch (ring->version) { in walk_v1_v2_rx() 273 __v1_v2_rx_user_ready(ppd.raw, ring->version); in walk_v1_v2_rx() 275 frame_num = (frame_num + 1) % ring->rd_num; in walk_v1_v2_rx() [all …]
|
D | psock_fanout.c | 102 char *ring; in sock_fanout_open_ring() local 116 ring = mmap(0, req.tp_block_size * req.tp_block_nr, in sock_fanout_open_ring() 118 if (!ring) { in sock_fanout_open_ring() 123 return ring; in sock_fanout_open_ring() 126 static int sock_fanout_read_ring(int fd, void *ring) in sock_fanout_read_ring() argument 128 struct tpacket2_hdr *header = ring; in sock_fanout_read_ring() 133 header = ring + (count * getpagesize()); in sock_fanout_read_ring()
|
/linux-4.1.27/drivers/gpu/drm/msm/ |
D | msm_ringbuffer.c | 23 struct msm_ringbuffer *ring; in msm_ringbuffer_new() local 28 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in msm_ringbuffer_new() 29 if (!ring) { in msm_ringbuffer_new() 34 ring->gpu = gpu; in msm_ringbuffer_new() 35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); in msm_ringbuffer_new() 36 if (IS_ERR(ring->bo)) { in msm_ringbuffer_new() 37 ret = PTR_ERR(ring->bo); in msm_ringbuffer_new() 38 ring->bo = NULL; in msm_ringbuffer_new() 42 ring->start = msm_gem_vaddr_locked(ring->bo); in msm_ringbuffer_new() 43 ring->end = ring->start + (size / 4); in msm_ringbuffer_new() [all …]
|
D | msm_ringbuffer.h | 31 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); 36 OUT_RING(struct msm_ringbuffer *ring, uint32_t data) in OUT_RING() argument 38 if (ring->cur == ring->end) in OUT_RING() 39 ring->cur = ring->start; in OUT_RING() 40 *(ring->cur++) = data; in OUT_RING()
|
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 123 struct xgbe_ring *ring) in xgbe_free_ring() argument 128 if (!ring) in xgbe_free_ring() 131 if (ring->rdata) { in xgbe_free_ring() 132 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_free_ring() 133 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_free_ring() 137 kfree(ring->rdata); in xgbe_free_ring() 138 ring->rdata = NULL; in xgbe_free_ring() 141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring() 142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, in xgbe_free_ring() 143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); in xgbe_free_ring() [all …]
|
D | xgbe-drv.c | 222 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) in xgbe_tx_avail_desc() argument 224 return (ring->rdesc_count - (ring->cur - ring->dirty)); in xgbe_tx_avail_desc() 227 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) in xgbe_rx_dirty_desc() argument 229 return (ring->cur - ring->dirty); in xgbe_rx_dirty_desc() 233 struct xgbe_ring *ring, unsigned int count) in xgbe_maybe_stop_tx_queue() argument 237 if (count > xgbe_tx_avail_desc(ring)) { in xgbe_maybe_stop_tx_queue() 240 ring->tx.queue_stopped = 1; in xgbe_maybe_stop_tx_queue() 245 if (ring->tx.xmit_more) in xgbe_maybe_stop_tx_queue() 246 pdata->hw_if.tx_start_xmit(channel, ring); in xgbe_maybe_stop_tx_queue() 716 struct xgbe_ring *ring; in xgbe_free_tx_data() local [all …]
|
D | xgbe-dev.c | 1092 struct xgbe_ring *ring = channel->tx_ring; in xgbe_tx_desc_init() local 1095 int start_index = ring->cur; in xgbe_tx_desc_init() 1100 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_tx_desc_init() 1101 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_tx_desc_init() 1108 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); in xgbe_tx_desc_init() 1111 rdata = XGBE_GET_DESC_DATA(ring, start_index); in xgbe_tx_desc_init() 1168 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_desc_init() local 1170 unsigned int start_index = ring->cur; in xgbe_rx_desc_init() 1176 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_rx_desc_init() 1177 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_rx_desc_init() [all …]
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/ |
D | flowring.c | 112 struct brcmf_flowring_ring *ring; in brcmf_flowring_create() local 152 ring = kzalloc(sizeof(*ring), GFP_ATOMIC); in brcmf_flowring_create() 153 if (!ring) in brcmf_flowring_create() 161 ring->hash_id = hash_idx; in brcmf_flowring_create() 162 ring->status = RING_CLOSED; in brcmf_flowring_create() 163 skb_queue_head_init(&ring->skblist); in brcmf_flowring_create() 164 flow->rings[i] = ring; in brcmf_flowring_create() 174 struct brcmf_flowring_ring *ring; in brcmf_flowring_tid() local 176 ring = flow->rings[flowid]; in brcmf_flowring_tid() 178 return flow->hash[ring->hash_id].fifo; in brcmf_flowring_tid() [all …]
|
D | pcie.c | 867 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; in brcmf_pcie_ring_mb_write_rptr() local 868 struct brcmf_pciedev_info *devinfo = ring->devinfo; in brcmf_pcie_ring_mb_write_rptr() 869 struct brcmf_commonring *commonring = &ring->commonring; in brcmf_pcie_ring_mb_write_rptr() 875 commonring->w_ptr, ring->id); in brcmf_pcie_ring_mb_write_rptr() 877 brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr); in brcmf_pcie_ring_mb_write_rptr() 885 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; in brcmf_pcie_ring_mb_write_wptr() local 886 struct brcmf_pciedev_info *devinfo = ring->devinfo; in brcmf_pcie_ring_mb_write_wptr() 887 struct brcmf_commonring *commonring = &ring->commonring; in brcmf_pcie_ring_mb_write_wptr() 893 commonring->r_ptr, ring->id); in brcmf_pcie_ring_mb_write_wptr() 895 brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr); in brcmf_pcie_ring_mb_write_wptr() [all …]
|
/linux-4.1.27/drivers/net/ethernet/broadcom/ |
D | bgmac.c | 49 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) in bgmac_dma_tx_reset() argument 54 if (!ring->mmio_base) in bgmac_dma_tx_reset() 61 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, in bgmac_dma_tx_reset() 64 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset() 76 ring->mmio_base, val); in bgmac_dma_tx_reset() 79 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); in bgmac_dma_tx_reset() 81 ring->mmio_base + BGMAC_DMA_TX_STATUS, in bgmac_dma_tx_reset() 85 ring->mmio_base); in bgmac_dma_tx_reset() 87 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset() 90 ring->mmio_base); in bgmac_dma_tx_reset() [all …]
|
D | bcmsysport.c | 665 struct bcm_sysport_tx_ring *ring) in __bcm_sysport_tx_reclaim() argument 674 txq = netdev_get_tx_queue(ndev, ring->index); in __bcm_sysport_tx_reclaim() 677 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); in __bcm_sysport_tx_reclaim() 679 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); in __bcm_sysport_tx_reclaim() 681 last_c_index = ring->c_index; in __bcm_sysport_tx_reclaim() 682 num_tx_cbs = ring->size; in __bcm_sysport_tx_reclaim() 693 ring->index, c_index, last_tx_cn, last_c_index); in __bcm_sysport_tx_reclaim() 696 cb = ring->cbs + last_c_index; in __bcm_sysport_tx_reclaim() 699 ring->desc_count++; in __bcm_sysport_tx_reclaim() 704 ring->c_index = c_index; in __bcm_sysport_tx_reclaim() [all …]
|
/linux-4.1.27/drivers/gpu/drm/qxl/ |
D | qxl_cmd.c | 33 struct ring { struct 39 struct ring *ring; argument 47 void qxl_ring_free(struct qxl_ring *ring) in qxl_ring_free() argument 49 kfree(ring); in qxl_ring_free() 52 void qxl_ring_init_hdr(struct qxl_ring *ring) in qxl_ring_init_hdr() argument 54 ring->ring->header.notify_on_prod = ring->n_elements; in qxl_ring_init_hdr() 65 struct qxl_ring *ring; in qxl_ring_create() local 67 ring = kmalloc(sizeof(*ring), GFP_KERNEL); in qxl_ring_create() 68 if (!ring) in qxl_ring_create() 71 ring->ring = (struct ring *)header; in qxl_ring_create() [all …]
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_debugfs.c | 34 struct fm10k_ring *ring = s->private; in fm10k_dbg_desc_seq_start() local 36 return (*pos < ring->count) ? pos : NULL; in fm10k_dbg_desc_seq_start() 42 struct fm10k_ring *ring = s->private; in fm10k_dbg_desc_seq_next() local 44 return (++(*pos) < ring->count) ? pos : NULL; in fm10k_dbg_desc_seq_next() 63 struct fm10k_ring *ring = s->private; in fm10k_dbg_tx_desc_seq_show() local 75 if (!ring->desc) { in fm10k_dbg_tx_desc_seq_show() 78 struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i); in fm10k_dbg_tx_desc_seq_show() 90 struct fm10k_ring *ring = s->private; in fm10k_dbg_rx_desc_seq_show() local 102 if (!ring->desc) { in fm10k_dbg_rx_desc_seq_show() 105 union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i); in fm10k_dbg_rx_desc_seq_show() [all …]
|
D | fm10k.h | 71 #define check_for_tx_hang(ring) \ argument 72 test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) 73 #define set_check_for_tx_hang(ring) \ argument 74 set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) 75 #define clear_check_for_tx_hang(ring) \ argument 76 clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) 155 struct fm10k_ring *ring; /* pointer to linked list of rings */ member 170 static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) in txring_txq() argument 172 return &ring->netdev->_tx[ring->queue_index]; in txring_txq() 177 for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;) [all …]
|
D | fm10k_main.c | 358 static inline void fm10k_rx_checksum(struct fm10k_ring *ring, in fm10k_rx_checksum() argument 365 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in fm10k_rx_checksum() 374 ring->rx_stats.csum_err++; in fm10k_rx_checksum() 393 static inline void fm10k_rx_hash(struct fm10k_ring *ring, in fm10k_rx_hash() argument 399 if (!(ring->netdev->features & NETIF_F_RXHASH)) in fm10k_rx_hash() 1155 static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) in fm10k_get_tx_completed() argument 1157 return ring->stats.packets; in fm10k_get_tx_completed() 1160 static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) in fm10k_get_tx_pending() argument 1163 u32 head = ring->next_to_clean; in fm10k_get_tx_pending() 1164 u32 tail = ring->next_to_use; in fm10k_get_tx_pending() [all …]
|
D | fm10k_ethtool.c | 222 struct fm10k_ring *ring; in fm10k_get_ethtool_stats() local 225 ring = interface->tx_ring[i]; in fm10k_get_ethtool_stats() 226 if (ring) in fm10k_get_ethtool_stats() 227 queue_stat = (u64 *)&ring->stats; in fm10k_get_ethtool_stats() 229 *(data++) = ring ? queue_stat[j] : 0; in fm10k_get_ethtool_stats() 231 ring = interface->rx_ring[i]; in fm10k_get_ethtool_stats() 232 if (ring) in fm10k_get_ethtool_stats() 233 queue_stat = (u64 *)&ring->stats; in fm10k_get_ethtool_stats() 235 *(data++) = ring ? queue_stat[j] : 0; in fm10k_get_ethtool_stats() 454 struct ethtool_ringparam *ring) in fm10k_get_ringparam() argument [all …]
|
D | fm10k_pci.c | 508 struct fm10k_ring *ring) in fm10k_configure_tx_ring() argument 511 u64 tdba = ring->dma; in fm10k_configure_tx_ring() 512 u32 size = ring->count * sizeof(struct fm10k_tx_desc); in fm10k_configure_tx_ring() 515 u8 reg_idx = ring->reg_idx; in fm10k_configure_tx_ring() 533 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; in fm10k_configure_tx_ring() 536 ring->next_to_clean = 0; in fm10k_configure_tx_ring() 537 ring->next_to_use = 0; in fm10k_configure_tx_ring() 540 if (ring->q_vector) { in fm10k_configure_tx_ring() 541 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); in fm10k_configure_tx_ring() 563 struct fm10k_ring *ring) in fm10k_enable_tx_ring() argument [all …]
|
D | fm10k_netdev.c | 162 void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, in fm10k_unmap_and_free_tx_resource() argument 168 dma_unmap_single(ring->dev, in fm10k_unmap_and_free_tx_resource() 173 dma_unmap_page(ring->dev, in fm10k_unmap_and_free_tx_resource() 1106 struct fm10k_ring *ring; in fm10k_get_stats64() local 1113 ring = ACCESS_ONCE(interface->rx_ring[i]); in fm10k_get_stats64() 1115 if (!ring) in fm10k_get_stats64() 1119 start = u64_stats_fetch_begin_irq(&ring->syncp); in fm10k_get_stats64() 1120 packets = ring->stats.packets; in fm10k_get_stats64() 1121 bytes = ring->stats.bytes; in fm10k_get_stats64() 1122 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in fm10k_get_stats64() [all …]
|
/linux-4.1.27/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_hw.c | 25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_init() argument 27 u32 *ring_cfg = ring->state; in xgene_enet_ring_init() 28 u64 addr = ring->dma; in xgene_enet_ring_init() 29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; in xgene_enet_ring_init() 45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_type() argument 47 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_type() 51 is_bufpool = xgene_enet_is_bufpool(ring->id); in xgene_enet_ring_set_type() 62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_recombbuf() argument 64 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_recombbuf() 72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, in xgene_enet_ring_wr32() argument [all …]
|
D | xgene_enet_main.c | 91 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) in xgene_enet_dst_ring_num() argument 93 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); in xgene_enet_dst_ring_num() 95 return ((u16)pdata->rm << 10) | ring->num; in xgene_enet_dst_ring_num() 105 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_len() argument 107 u32 __iomem *cmd_base = ring->cmd_base; in xgene_enet_ring_len() 362 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, in xgene_enet_process_ring() argument 365 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); in xgene_enet_process_ring() 367 u16 head = ring->head; in xgene_enet_process_ring() 368 u16 slots = ring->slots - 1; in xgene_enet_process_ring() 372 raw_desc = &ring->raw_desc[head]; in xgene_enet_process_ring() [all …]
|
/linux-4.1.27/drivers/staging/comedi/drivers/ |
D | mite.c | 184 struct mite_dma_descriptor_ring *ring = in mite_alloc_ring() local 187 if (!ring) in mite_alloc_ring() 189 ring->hw_dev = get_device(&mite->pcidev->dev); in mite_alloc_ring() 190 if (!ring->hw_dev) { in mite_alloc_ring() 191 kfree(ring); in mite_alloc_ring() 194 ring->n_links = 0; in mite_alloc_ring() 195 ring->descriptors = NULL; in mite_alloc_ring() 196 ring->descriptors_dma_addr = 0; in mite_alloc_ring() 197 return ring; in mite_alloc_ring() 201 void mite_free_ring(struct mite_dma_descriptor_ring *ring) in mite_free_ring() argument [all …]
|
D | mite.h | 49 struct mite_dma_descriptor_ring *ring; member 76 void mite_free_ring(struct mite_dma_descriptor_ring *ring); 80 *ring, unsigned min_channel, 86 *ring) in mite_request_channel() 88 return mite_request_channel_in_range(mite, ring, 0, in mite_request_channel() 111 int mite_buf_change(struct mite_dma_descriptor_ring *ring,
|
/linux-4.1.27/drivers/gpu/drm/r128/ |
D | r128_drv.h | 82 drm_r128_ring_buffer_t ring; member 418 drm_r128_ring_buffer_t *ring = &dev_priv->ring; in r128_update_ring_snapshot() local 419 ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); in r128_update_ring_snapshot() 420 if (ring->space <= 0) in r128_update_ring_snapshot() 421 ring->space += ring->size; in r128_update_ring_snapshot() 438 drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ 439 if (ring->space < ring->high_mark) { \ 442 if (ring->space >= ring->high_mark) \ 477 int write, _nr; unsigned int tail_mask; volatile u32 *ring; 482 if (dev_priv->ring.space <= (n) * sizeof(u32)) { \ [all …]
|
D | r128_cce.c | 206 if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) { in r128_do_cce_idle() 232 dev_priv->cce_mode | dev_priv->ring.size_l2qw in r128_do_cce_start() 248 dev_priv->ring.tail = 0; in r128_do_cce_reset() 539 dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; in r128_do_init_cce() 540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle in r128_do_init_cce() 542 dev_priv->ring.size = init->ring_size; in r128_do_init_cce() 543 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); in r128_do_init_cce() 545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; in r128_do_init_cce() 547 dev_priv->ring.high_mark = 128; in r128_do_init_cce() 866 drm_r128_ring_buffer_t *ring = &dev_priv->ring; in r128_wait_ring() local [all …]
|
/linux-4.1.27/drivers/scsi/fnic/ |
D | vnic_wq_copy.h | 31 struct vnic_dev_ring ring; member 38 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail() 43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use() 48 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc() 55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post() 57 wq->ring.desc_avail--; in vnic_wq_copy_post() 76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; in vnic_wq_copy_desc_process() 78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count); in vnic_wq_copy_desc_process() 79 wq->ring.desc_avail += cnt; in vnic_wq_copy_desc_process() 88 struct fcpio_host_req *wq_desc = wq->ring.descs; in vnic_wq_copy_service() [all …]
|
D | vnic_dev.c | 160 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, in vnic_dev_desc_ring_size() argument 173 ring->base_align = 512; in vnic_dev_desc_ring_size() 178 ring->desc_count = ALIGN(desc_count, count_align); in vnic_dev_desc_ring_size() 180 ring->desc_size = ALIGN(desc_size, desc_align); in vnic_dev_desc_ring_size() 182 ring->size = ring->desc_count * ring->desc_size; in vnic_dev_desc_ring_size() 183 ring->size_unaligned = ring->size + ring->base_align; in vnic_dev_desc_ring_size() 185 return ring->size_unaligned; in vnic_dev_desc_ring_size() 188 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) in vnic_dev_clear_desc_ring() argument 190 memset(ring->descs, 0, ring->size); in vnic_dev_clear_desc_ring() 193 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, in vnic_dev_alloc_desc_ring() argument [all …]
|
D | vnic_wq.c | 31 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 48 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 49 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 74 vnic_dev_free_desc_ring(vdev, &wq->ring); in vnic_wq_free() 101 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc() 120 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_wq_init() 122 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); in vnic_wq_init() 173 wq->ring.desc_avail++; in vnic_wq_clean() 182 vnic_dev_clear_desc_ring(&wq->ring); in vnic_wq_clean()
|
D | vnic_cq_copy.h | 35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() 36 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service() 45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service() 50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() 51 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service()
|
D | vnic_rq.c | 31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 75 vnic_dev_free_desc_ring(vdev, &rq->ring); in vnic_rq_free() 101 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); in vnic_rq_alloc() 121 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_rq_init() 123 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); in vnic_rq_init() 183 rq->ring.desc_avail++; in vnic_rq_clean() 195 vnic_dev_clear_desc_ring(&rq->ring); in vnic_rq_clean()
|
D | vnic_cq.h | 65 struct vnic_dev_ring ring; member 81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 82 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 93 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 99 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
|
D | vnic_cq.c | 26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 60 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_cq_init() 62 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init() 84 vnic_dev_clear_desc_ring(&cq->ring); in vnic_cq_clean()
|
D | vnic_wq.h | 89 struct vnic_dev_ring ring; member 99 return wq->ring.desc_avail; in vnic_wq_desc_avail() 105 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 136 wq->ring.desc_avail--; in vnic_wq_post() 152 wq->ring.desc_avail++; in vnic_wq_service()
|
D | vnic_rq.h | 96 struct vnic_dev_ring ring; member 108 return rq->ring.desc_avail; in vnic_rq_desc_avail() 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 145 rq->ring.desc_avail--; in vnic_rq_post() 172 rq->ring.desc_avail += count; in vnic_rq_return_descs() 197 rq->ring.desc_avail++; in vnic_rq_service()
|
D | vnic_wq_copy.c | 66 vnic_dev_clear_desc_ring(&wq->ring); in vnic_wq_copy_clean() 74 vnic_dev_free_desc_ring(vdev, &wq->ring); in vnic_wq_copy_free() 95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_copy_alloc() 108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_wq_copy_init() 110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); in vnic_wq_copy_init()
|
D | vnic_dev.h | 116 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 119 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); 120 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, 123 struct vnic_dev_ring *ring);
|
/linux-4.1.27/drivers/net/ethernet/broadcom/genet/ |
D | bcmgenet.c | 350 unsigned int ring, in bcmgenet_tdma_ring_readl() argument 354 (DMA_RING_SIZE * ring) + in bcmgenet_tdma_ring_readl() 359 unsigned int ring, u32 val, in bcmgenet_tdma_ring_writel() argument 363 (DMA_RING_SIZE * ring) + in bcmgenet_tdma_ring_writel() 368 unsigned int ring, in bcmgenet_rdma_ring_readl() argument 372 (DMA_RING_SIZE * ring) + in bcmgenet_rdma_ring_readl() 377 unsigned int ring, u32 val, in bcmgenet_rdma_ring_writel() argument 381 (DMA_RING_SIZE * ring) + in bcmgenet_rdma_ring_writel() 943 struct bcmgenet_tx_ring *ring) in bcmgenet_get_txcb() argument 947 tx_cb_ptr = ring->cbs; in bcmgenet_get_txcb() [all …]
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.h | 195 #define ring_is_ps_enabled(ring) \ argument 196 test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) 197 #define set_ring_ps_enabled(ring) \ argument 198 set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) 199 #define clear_ring_ps_enabled(ring) \ argument 200 clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) 201 #define check_for_tx_hang(ring) \ argument 202 test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 203 #define set_check_for_tx_hang(ring) \ argument 204 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) [all …]
|
D | i40evf_ethtool.c | 225 struct ethtool_ringparam *ring) in i40evf_get_ringparam() argument 229 ring->rx_max_pending = I40EVF_MAX_RXD; in i40evf_get_ringparam() 230 ring->tx_max_pending = I40EVF_MAX_TXD; in i40evf_get_ringparam() 231 ring->rx_pending = adapter->rx_desc_count; in i40evf_get_ringparam() 232 ring->tx_pending = adapter->tx_desc_count; in i40evf_get_ringparam() 244 struct ethtool_ringparam *ring) in i40evf_set_ringparam() argument 249 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in i40evf_set_ringparam() 252 new_tx_count = clamp_t(u32, ring->tx_pending, in i40evf_set_ringparam() 257 new_rx_count = clamp_t(u32, ring->rx_pending, in i40evf_set_ringparam()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.h | 196 #define ring_is_ps_enabled(ring) \ argument 197 test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) 198 #define set_ring_ps_enabled(ring) \ argument 199 set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) 200 #define clear_ring_ps_enabled(ring) \ argument 201 clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) 202 #define check_for_tx_hang(ring) \ argument 203 test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 204 #define set_check_for_tx_hang(ring) \ argument 205 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) [all …]
|
/linux-4.1.27/drivers/net/vmxnet3/ |
D | vmxnet3_int.h | 134 vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) in vmxnet3_cmd_ring_adv_next2fill() argument 136 ring->next2fill++; in vmxnet3_cmd_ring_adv_next2fill() 137 if (unlikely(ring->next2fill == ring->size)) { in vmxnet3_cmd_ring_adv_next2fill() 138 ring->next2fill = 0; in vmxnet3_cmd_ring_adv_next2fill() 139 VMXNET3_FLIP_RING_GEN(ring->gen); in vmxnet3_cmd_ring_adv_next2fill() 144 vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) in vmxnet3_cmd_ring_adv_next2comp() argument 146 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); in vmxnet3_cmd_ring_adv_next2comp() 150 vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) in vmxnet3_cmd_ring_desc_avail() argument 152 return (ring->next2comp > ring->next2fill ? 0 : ring->size) + in vmxnet3_cmd_ring_desc_avail() 153 ring->next2comp - ring->next2fill - 1; in vmxnet3_cmd_ring_desc_avail() [all …]
|
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_io.c | 1136 struct qlcnic_host_rds_ring *ring, in qlcnic_process_rxbuf() argument 1142 buffer = &ring->rx_buf_arr[index]; in qlcnic_process_rxbuf() 1148 pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size, in qlcnic_process_rxbuf() 1192 struct qlcnic_host_sds_ring *sds_ring, int ring, in qlcnic_process_rcv() argument 1203 if (unlikely(ring >= adapter->max_rds_rings)) in qlcnic_process_rcv() 1206 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_process_rcv() 1260 int ring, u64 sts_data0, u64 sts_data1) in qlcnic_process_lro() argument 1275 if (unlikely(ring >= adapter->max_rds_rings)) in qlcnic_process_lro() 1278 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_process_lro() 1363 u8 ring; in qlcnic_process_rcv_ring() local [all …]
|
D | qlcnic_ctx.c | 417 int ring) in qlcnic_82xx_fw_cmd_create_tx_ctx() argument 467 index = temp_nsds_rings + ring; in qlcnic_82xx_fw_cmd_create_tx_ctx() 503 index = adapter->drv_sds_rings + ring; in qlcnic_82xx_fw_cmd_create_tx_ctx() 562 int err, ring; in qlcnic_alloc_hw_resources() local 573 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { in qlcnic_alloc_hw_resources() 574 tx_ring = &adapter->tx_ring[ring]; in qlcnic_alloc_hw_resources() 594 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_alloc_hw_resources() 595 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_alloc_hw_resources() 607 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { in qlcnic_alloc_hw_resources() 608 sds_ring = &recv_ctx->sds_rings[ring]; in qlcnic_alloc_hw_resources() [all …]
|
D | qlcnic_main.c | 1713 int err, ring, num_sds_rings; in qlcnic_request_irq() local 1747 for (ring = 0; ring < num_sds_rings; ring++) { in qlcnic_request_irq() 1748 sds_ring = &recv_ctx->sds_rings[ring]; in qlcnic_request_irq() 1751 (ring == (num_sds_rings - 1))) { in qlcnic_request_irq() 1761 netdev->name, ring); in qlcnic_request_irq() 1766 netdev->name, ring); in qlcnic_request_irq() 1780 for (ring = 0; ring < adapter->drv_tx_rings; in qlcnic_request_irq() 1781 ring++) { in qlcnic_request_irq() 1782 tx_ring = &adapter->tx_ring[ring]; in qlcnic_request_irq() 1784 "%s-tx-%d", netdev->name, ring); in qlcnic_request_irq() [all …]
|
D | qlcnic_init.c | 88 int i, ring; in qlcnic_release_rx_buffers() local 91 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_release_rx_buffers() 92 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_release_rx_buffers() 113 int i, ring; in qlcnic_reset_rx_buffers_list() local 116 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_reset_rx_buffers_list() 117 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_reset_rx_buffers_list() 170 int ring; in qlcnic_free_sw_resources() local 177 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_free_sw_resources() 178 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_free_sw_resources() 191 int ring, i; in qlcnic_alloc_sw_resources() local [all …]
|
D | qlcnic_ethtool.c | 527 int ring, i = 0; in qlcnic_get_regs() local 552 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { in qlcnic_get_regs() 553 tx_ring = &adapter->tx_ring[ring]; in qlcnic_get_regs() 565 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_get_regs() 566 rds_rings = &recv_ctx->rds_rings[ring]; in qlcnic_get_regs() 572 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { in qlcnic_get_regs() 573 sds_ring = &(recv_ctx->sds_rings[ring]); in qlcnic_get_regs() 625 struct ethtool_ringparam *ring) in qlcnic_get_ringparam() argument 629 ring->rx_pending = adapter->num_rxd; in qlcnic_get_ringparam() 630 ring->rx_jumbo_pending = adapter->num_jumbo_rxd; in qlcnic_get_ringparam() [all …]
|
/linux-4.1.27/drivers/dma/ |
D | xgene-dma.c | 419 static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) in xgene_dma_ring_desc_cnt() argument 421 u32 __iomem *cmd_base = ring->cmd_base; in xgene_dma_ring_desc_cnt() 695 static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, in xgene_chan_xfer_request() argument 701 if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) in xgene_chan_xfer_request() 705 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request() 711 if (++ring->head == ring->slots) in xgene_chan_xfer_request() 712 ring->head = 0; in xgene_chan_xfer_request() 722 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request() 724 if (++ring->head == ring->slots) in xgene_chan_xfer_request() 725 ring->head = 0; in xgene_chan_xfer_request() [all …]
|
/linux-4.1.27/virt/kvm/ |
D | coalesced_mmio.c | 44 struct kvm_coalesced_mmio_ring *ring; in coalesced_mmio_has_room() local 53 ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_has_room() 54 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_has_room() 68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_write() local 82 ring->coalesced_mmio[ring->last].phys_addr = addr; in coalesced_mmio_write() 83 ring->coalesced_mmio[ring->last].len = len; in coalesced_mmio_write() 84 memcpy(ring->coalesced_mmio[ring->last].data, val, len); in coalesced_mmio_write() 86 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_write()
|
/linux-4.1.27/Documentation/networking/ |
D | netlink_mmap.txt | 14 Memory mapped netlink I/O used two circular ring buffers for RX and TX which 17 The RX ring is used by the kernel to directly construct netlink messages into 19 additionally as long as the ring contains messages no recvmsg() or poll() 22 The TX ring is used to process messages directly from user-space memory, the 23 kernel processes all messages contained in the ring using a single sendmsg() 31 - ring setup 32 - conversion of the RX path to get messages from the ring instead of recvmsg() 33 - conversion of the TX path to construct messages into the ring 35 Ring setup is done using setsockopt() to provide the ring parameters to the 36 kernel, then a call to mmap() to map the ring into the processes address space: [all …]
|
D | packet_mmap.txt | 75 setsockopt() ---> allocation of the circular buffer (ring) 103 allocated RX and TX buffer ring with a single mmap() call. 104 See "Mapping and use of the circular buffer (ring)". 116 setsockopt() ---> allocation of the circular buffer (ring) 124 the ring 215 circular buffer (ring) of unswappable memory. 252 buffer (ring)". 406 + Mapping and use of the circular buffer (ring) 423 RX and TX buffer ring has to be done with one call to mmap: 432 RX must be the first as the kernel maps the TX ring memory right [all …]
|
D | spider_net.txt | 18 The receive (RX) ring is a circular linked list of RX descriptors, 19 together with three pointers into the ring that are used to manage its 22 The elements of the ring are called "descriptors" or "descrs"; they 36 ring is handed off to the hardware, which sequentially fills in the 74 other descrs in the ring should be "empty" as well. 78 of the ring, starting at the tail pointer, and listing the status 116 the OS fails to empty the RX ring fast enough, the hardware GDACTDPA 126 When the OS finally has a chance to run, it will empty out the RX ring. 174 search the ring for the next full descr, and the driver will resume 175 operations there. Since this will leave "holes" in the ring, there [all …]
|
/linux-4.1.27/drivers/dma/ioat/ |
D | dma_v2.c | 475 struct ioat_ring_ent **ring; in ioat2_alloc_ring() local 483 ring = kcalloc(descs, sizeof(*ring), flags); in ioat2_alloc_ring() 484 if (!ring) in ioat2_alloc_ring() 487 ring[i] = ioat2_alloc_ring_ent(c, flags); in ioat2_alloc_ring() 488 if (!ring[i]) { in ioat2_alloc_ring() 490 ioat2_free_ring_ent(ring[i], c); in ioat2_alloc_ring() 491 kfree(ring); in ioat2_alloc_ring() 494 set_desc_id(ring[i], i); in ioat2_alloc_ring() 499 struct ioat_ring_ent *next = ring[i+1]; in ioat2_alloc_ring() 500 struct ioat_dma_descriptor *hw = ring[i]->hw; in ioat2_alloc_ring() [all …]
|
/linux-4.1.27/drivers/gpu/drm/msm/adreno/ |
D | adreno_gpu.c | 87 static uint32_t get_wptr(struct msm_ringbuffer *ring) in get_wptr() argument 89 return ring->cur - ring->start; in get_wptr() 127 struct msm_ringbuffer *ring = gpu->rb; in adreno_submit() local 140 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); in adreno_submit() 141 OUT_RING(ring, submit->cmd[i].iova); in adreno_submit() 142 OUT_RING(ring, submit->cmd[i].size); in adreno_submit() 153 OUT_PKT2(ring); in adreno_submit() 155 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); in adreno_submit() 156 OUT_RING(ring, submit->fence); in adreno_submit() 163 OUT_PKT3(ring, CP_EVENT_WRITE, 1); in adreno_submit() [all …]
|
D | a4xx_gpu.c | 112 struct msm_ringbuffer *ring = gpu->rb; in a4xx_me_init() local 114 OUT_PKT3(ring, CP_ME_INIT, 17); in a4xx_me_init() 115 OUT_RING(ring, 0x000003f7); in a4xx_me_init() 116 OUT_RING(ring, 0x00000000); in a4xx_me_init() 117 OUT_RING(ring, 0x00000000); in a4xx_me_init() 118 OUT_RING(ring, 0x00000000); in a4xx_me_init() 119 OUT_RING(ring, 0x00000080); in a4xx_me_init() 120 OUT_RING(ring, 0x00000100); in a4xx_me_init() 121 OUT_RING(ring, 0x00000180); in a4xx_me_init() 122 OUT_RING(ring, 0x00006600); in a4xx_me_init() [all …]
|
D | a3xx_gpu.c | 46 struct msm_ringbuffer *ring = gpu->rb; in a3xx_me_init() local 48 OUT_PKT3(ring, CP_ME_INIT, 17); in a3xx_me_init() 49 OUT_RING(ring, 0x000003f7); in a3xx_me_init() 50 OUT_RING(ring, 0x00000000); in a3xx_me_init() 51 OUT_RING(ring, 0x00000000); in a3xx_me_init() 52 OUT_RING(ring, 0x00000000); in a3xx_me_init() 53 OUT_RING(ring, 0x00000080); in a3xx_me_init() 54 OUT_RING(ring, 0x00000100); in a3xx_me_init() 55 OUT_RING(ring, 0x00000180); in a3xx_me_init() 56 OUT_RING(ring, 0x00006600); in a3xx_me_init() [all …]
|
D | adreno_gpu.h | 247 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) in OUT_PKT0() argument 249 adreno_wait_ring(ring->gpu, cnt+1); in OUT_PKT0() 250 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); in OUT_PKT0() 255 OUT_PKT2(struct msm_ringbuffer *ring) in OUT_PKT2() argument 257 adreno_wait_ring(ring->gpu, 1); in OUT_PKT2() 258 OUT_RING(ring, CP_TYPE2_PKT); in OUT_PKT2() 262 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) in OUT_PKT3() argument 264 adreno_wait_ring(ring->gpu, cnt+1); in OUT_PKT3() 265 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); in OUT_PKT3()
|
/linux-4.1.27/drivers/net/ethernet/qlogic/netxen/ |
D | netxen_nic_ctx.c | 707 int ring; in netxen_init_old_ctx() local 720 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_init_old_ctx() 721 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_init_old_ctx() 723 hwctx->rcv_rings[ring].addr = in netxen_init_old_ctx() 725 hwctx->rcv_rings[ring].size = in netxen_init_old_ctx() 729 for (ring = 0; ring < adapter->max_sds_rings; ring++) { in netxen_init_old_ctx() 730 sds_ring = &recv_ctx->sds_rings[ring]; in netxen_init_old_ctx() 732 if (ring == 0) { in netxen_init_old_ctx() 736 hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); in netxen_init_old_ctx() 737 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); in netxen_init_old_ctx() [all …]
|
D | netxen_nic_init.c | 112 int i, ring; in netxen_release_rx_buffers() local 115 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_release_rx_buffers() 116 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_release_rx_buffers() 170 int ring; in netxen_free_sw_resources() local 177 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_free_sw_resources() 178 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_free_sw_resources() 201 int ring, i; in netxen_alloc_sw_resources() local 230 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_alloc_sw_resources() 231 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_alloc_sw_resources() 232 switch (ring) { in netxen_alloc_sw_resources() [all …]
|
D | netxen_nic_ethtool.c | 300 int ring, i = 0; in netxen_nic_get_regs() local 356 for (ring = 0; ring < adapter->max_sds_rings; ring++) { in netxen_nic_get_regs() 357 sds_ring = &(recv_ctx->sds_rings[ring]); in netxen_nic_get_regs() 405 struct ethtool_ringparam *ring) in netxen_nic_get_ringparam() argument 409 ring->rx_pending = adapter->num_rxd; in netxen_nic_get_ringparam() 410 ring->rx_jumbo_pending = adapter->num_jumbo_rxd; in netxen_nic_get_ringparam() 411 ring->rx_jumbo_pending += adapter->num_lro_rxd; in netxen_nic_get_ringparam() 412 ring->tx_pending = adapter->num_txd; in netxen_nic_get_ringparam() 415 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; in netxen_nic_get_ringparam() 416 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; in netxen_nic_get_ringparam() [all …]
|
/linux-4.1.27/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.c | 33 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 46 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 47 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 72 vnic_dev_free_desc_ring(vdev, &rq->ring); in vnic_rq_free() 100 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); in vnic_rq_alloc() 119 unsigned int count = rq->ring.desc_count; in vnic_rq_init_start() 121 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_rq_init_start() 190 unsigned int count = rq->ring.desc_count; in vnic_rq_clean() 195 for (i = 0; i < rq->ring.desc_count; i++) { in vnic_rq_clean() 199 rq->ring.desc_avail = rq->ring.desc_count - 1; in vnic_rq_clean() [all …]
|
D | vnic_dev.c | 199 static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, in vnic_dev_desc_ring_size() argument 211 ring->base_align = 512; in vnic_dev_desc_ring_size() 216 ring->desc_count = ALIGN(desc_count, count_align); in vnic_dev_desc_ring_size() 218 ring->desc_size = ALIGN(desc_size, desc_align); in vnic_dev_desc_ring_size() 220 ring->size = ring->desc_count * ring->desc_size; in vnic_dev_desc_ring_size() 221 ring->size_unaligned = ring->size + ring->base_align; in vnic_dev_desc_ring_size() 223 return ring->size_unaligned; in vnic_dev_desc_ring_size() 226 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) in vnic_dev_clear_desc_ring() argument 228 memset(ring->descs, 0, ring->size); in vnic_dev_clear_desc_ring() 231 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, in vnic_dev_alloc_desc_ring() argument [all …]
|
D | vnic_wq.c | 33 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 46 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 47 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 75 vnic_dev_free_desc_ring(vdev, &wq->ring); in vnic_wq_free() 103 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc() 122 unsigned int count = wq->ring.desc_count; in vnic_wq_init_start() 124 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_wq_init_start() 188 wq->ring.desc_avail++; in vnic_wq_clean() 197 vnic_dev_clear_desc_ring(&wq->ring); in vnic_wq_clean()
|
D | vnic_cq.h | 62 struct vnic_dev_ring ring; member 83 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 84 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 95 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 100 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 101 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
|
D | vnic_cq.c | 30 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 49 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 64 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_cq_init() 66 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init() 90 vnic_dev_clear_desc_ring(&cq->ring); in vnic_cq_clean()
|
D | vnic_wq.h | 84 struct vnic_dev_ring ring; member 94 return wq->ring.desc_avail; in vnic_wq_desc_avail() 100 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 139 wq->ring.desc_avail -= desc_skip_cnt; in vnic_wq_post() 155 wq->ring.desc_avail++; in vnic_wq_service()
|
D | vnic_rq.h | 82 struct vnic_dev_ring ring; member 108 return rq->ring.desc_avail; in vnic_rq_desc_avail() 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 142 rq->ring.desc_avail--; in vnic_rq_post() 164 rq->ring.desc_avail += count; in vnic_rq_return_descs() 189 rq->ring.desc_avail++; in vnic_rq_service()
|
/linux-4.1.27/drivers/usb/host/ |
D | xhci-mem.c | 128 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, in xhci_link_rings() argument 134 if (!ring || !first || !last) in xhci_link_rings() 137 next = ring->enq_seg->next; in xhci_link_rings() 138 xhci_link_segments(xhci, ring->enq_seg, first, ring->type); in xhci_link_rings() 139 xhci_link_segments(xhci, last, next, ring->type); in xhci_link_rings() 140 ring->num_segs += num_segs; in xhci_link_rings() 141 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; in xhci_link_rings() 143 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { in xhci_link_rings() 144 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control in xhci_link_rings() 148 ring->last_seg = last; in xhci_link_rings() [all …]
|
D | u132-hcd.c | 154 struct u132_ring *ring; member 189 struct u132_ring ring[MAX_U132_RINGS]; member 304 static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring) in u132_ring_put_kref() argument 309 static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, in u132_ring_requeue_work() argument 313 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) in u132_ring_requeue_work() 315 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) in u132_ring_requeue_work() 320 static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring, in u132_ring_queue_work() argument 324 u132_ring_requeue_work(u132, ring, delta); in u132_ring_queue_work() 327 static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring) in u132_ring_cancel_work() argument 329 if (cancel_delayed_work(&ring->scheduler)) in u132_ring_cancel_work() [all …]
|
D | xhci-dbg.c | 329 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_dbg_ring_ptrs() argument 332 ring->dequeue, in xhci_dbg_ring_ptrs() 333 (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg, in xhci_dbg_ring_ptrs() 334 ring->dequeue)); in xhci_dbg_ring_ptrs() 336 ring->deq_updates); in xhci_dbg_ring_ptrs() 338 ring->enqueue, in xhci_dbg_ring_ptrs() 339 (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg, in xhci_dbg_ring_ptrs() 340 ring->enqueue)); in xhci_dbg_ring_ptrs() 342 ring->enq_updates); in xhci_dbg_ring_ptrs() 354 void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_debug_ring() argument [all …]
|
D | xhci-ring.c | 93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, in last_trb_on_last_seg() argument 96 if (ring == xhci->event_ring) in last_trb_on_last_seg() 107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in last_trb() argument 110 if (ring == xhci->event_ring) in last_trb() 116 static int enqueue_is_link_trb(struct xhci_ring *ring) in enqueue_is_link_trb() argument 118 struct xhci_link_trb *link = &ring->enqueue->link; in enqueue_is_link_trb() 127 struct xhci_ring *ring, in next_trb() argument 131 if (last_trb(xhci, ring, *seg, *trb)) { in next_trb() 143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument 145 ring->deq_updates++; in inc_deq() [all …]
|
/linux-4.1.27/drivers/net/irda/ |
D | donauboe.c | 272 if (self->ring) in toshoboe_dumpregs() 275 ringbase = virt_to_bus (self->ring); in toshoboe_dumpregs() 279 printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control); in toshoboe_dumpregs() 283 printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control); in toshoboe_dumpregs() 490 self->ring->tx[i].len = 0; in toshoboe_initring() 491 self->ring->tx[i].control = 0x00; in toshoboe_initring() 492 self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]); in toshoboe_initring() 497 self->ring->rx[i].len = RX_LEN; in toshoboe_initring() 498 self->ring->rx[i].len = 0; in toshoboe_initring() 499 self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]); in toshoboe_initring() [all …]
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe.h | 238 #define check_for_tx_hang(ring) \ argument 239 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 240 #define set_check_for_tx_hang(ring) \ argument 241 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 242 #define clear_check_for_tx_hang(ring) \ argument 243 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 244 #define ring_is_rsc_enabled(ring) \ argument 245 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 246 #define set_ring_rsc_enabled(ring) \ argument 247 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) [all …]
|
D | ixgbe_lib.c | 780 static void ixgbe_add_ring(struct ixgbe_ring *ring, in ixgbe_add_ring() argument 783 ring->next = head->ring; in ixgbe_add_ring() 784 head->ring = ring; in ixgbe_add_ring() 806 struct ixgbe_ring *ring; in ixgbe_alloc_q_vector() local 863 ring = q_vector->ring; in ixgbe_alloc_q_vector() 882 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector() 883 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector() 886 ring->q_vector = q_vector; in ixgbe_alloc_q_vector() 889 ixgbe_add_ring(ring, &q_vector->tx); in ixgbe_alloc_q_vector() 892 ring->count = adapter->tx_ring_count; in ixgbe_alloc_q_vector() [all …]
|
D | ixgbe_main.c | 904 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, in ixgbe_unmap_and_free_tx_resource() argument 910 dma_unmap_single(ring->dev, in ixgbe_unmap_and_free_tx_resource() 915 dma_unmap_page(ring->dev, in ixgbe_unmap_and_free_tx_resource() 999 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) in ixgbe_get_tx_completed() argument 1001 return ring->stats.packets; in ixgbe_get_tx_completed() 1004 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) in ixgbe_get_tx_pending() argument 1010 if (ring->l2_accel_priv) in ixgbe_get_tx_pending() 1011 adapter = ring->l2_accel_priv->real_adapter; in ixgbe_get_tx_pending() 1013 adapter = netdev_priv(ring->netdev); in ixgbe_get_tx_pending() 1016 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); in ixgbe_get_tx_pending() [all …]
|
/linux-4.1.27/drivers/net/ethernet/neterion/vxge/ |
D | vxge-traffic.c | 243 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) in vxge_hw_vpath_dynamic_rti_ci_set() argument 245 u64 val64 = ring->tim_rti_cfg1_saved; in vxge_hw_vpath_dynamic_rti_ci_set() 248 ring->tim_rti_cfg1_saved = val64; in vxge_hw_vpath_dynamic_rti_ci_set() 249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); in vxge_hw_vpath_dynamic_rti_ci_set() 268 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) in vxge_hw_vpath_dynamic_rti_rtimer_set() argument 270 u64 val64 = ring->tim_rti_cfg3_saved; in vxge_hw_vpath_dynamic_rti_rtimer_set() 271 u64 timer = (ring->rtimer * 1000) / 272; in vxge_hw_vpath_dynamic_rti_rtimer_set() 278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); in vxge_hw_vpath_dynamic_rti_rtimer_set() 1137 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, in vxge_hw_ring_rxd_reserve() argument 1143 channel = &ring->channel; in vxge_hw_ring_rxd_reserve() [all …]
|
D | vxge-main.c | 133 struct vxge_ring *ring; in VXGE_COMPLETE_ALL_RX() local 137 ring = &vdev->vpaths[i].ring; in VXGE_COMPLETE_ALL_RX() 138 vxge_hw_vpath_poll_rx(ring->handle); in VXGE_COMPLETE_ALL_RX() 194 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) in vxge_rx_alloc() argument 200 dev = ring->ndev; in vxge_rx_alloc() 202 ring->ndev->name, __func__, __LINE__); in vxge_rx_alloc() 212 ring->stats.skb_alloc_fail++; in vxge_rx_alloc() 217 "%s: %s:%d Skb : 0x%p", ring->ndev->name, in vxge_rx_alloc() 226 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); in vxge_rx_alloc() 234 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) in vxge_rx_map() argument [all …]
|
D | vxge-config.c | 1363 if (device_config->vp_config[i].ring.enable == in vxge_hw_device_initialize() 1365 nblocks += device_config->vp_config[i].ring.ring_blocks; in vxge_hw_device_initialize() 2022 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) in __vxge_hw_ring_first_block_address_get() argument 2026 dma_object = ring->mempool->memblocks_dma_arr; in __vxge_hw_ring_first_block_address_get() 2064 struct __vxge_hw_ring *ring, u32 from, in __vxge_hw_ring_rxdblock_link() argument 2100 struct __vxge_hw_ring *ring = in __vxge_hw_ring_mempool_item_alloc() local 2104 for (i = 0; i < ring->rxds_per_block; i++) { in __vxge_hw_ring_mempool_item_alloc() 2109 u32 reserve_index = ring->channel.reserve_ptr - in __vxge_hw_ring_mempool_item_alloc() 2110 (index * ring->rxds_per_block + i + 1); in __vxge_hw_ring_mempool_item_alloc() 2113 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + in __vxge_hw_ring_mempool_item_alloc() [all …]
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf.h | 100 #define check_for_tx_hang(ring) \ argument 101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) 102 #define set_check_for_tx_hang(ring) \ argument 103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) 104 #define clear_check_for_tx_hang(ring) \ argument 105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) 176 struct ixgbevf_ring *ring; /* pointer to linked list of rings */ member 185 for (pos = (head).ring; pos != NULL; pos = pos->next) 236 q_vector->tx.ring->stats.yields++; in ixgbevf_qv_lock_napi() 273 q_vector->rx.ring->stats.yields++; in ixgbevf_qv_lock_poll() [all …]
|
D | ixgbevf_main.c | 218 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) in ixgbevf_get_tx_completed() argument 220 return ring->stats.packets; in ixgbevf_get_tx_completed() 223 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) in ixgbevf_get_tx_pending() argument 225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); in ixgbevf_get_tx_pending() 228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); in ixgbevf_get_tx_pending() 229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); in ixgbevf_get_tx_pending() 233 tail - head : (tail + ring->count - head); in ixgbevf_get_tx_pending() 466 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, in ixgbevf_rx_checksum() argument 473 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in ixgbevf_rx_checksum() 479 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum() [all …]
|
/linux-4.1.27/drivers/net/ethernet/pasemi/ |
D | pasemi_mac.c | 316 struct pasemi_mac_csring *ring; in pasemi_mac_setup_csring() local 321 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), in pasemi_mac_setup_csring() 324 if (!ring) { in pasemi_mac_setup_csring() 329 chno = ring->chan.chno; in pasemi_mac_setup_csring() 331 ring->size = CS_RING_SIZE; in pasemi_mac_setup_csring() 332 ring->next_to_fill = 0; in pasemi_mac_setup_csring() 335 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) in pasemi_mac_setup_csring() 339 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); in pasemi_mac_setup_csring() 340 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); in pasemi_mac_setup_csring() 345 ring->events[0] = pasemi_dma_alloc_flag(); in pasemi_mac_setup_csring() [all …]
|
D | pasemi_mac.h | 117 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ argument 118 & ((ring)->size - 1)) 119 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) argument
|
/linux-4.1.27/arch/powerpc/boot/dts/fsl/ |
D | qoriq-sec6.0-0.dtsi | 42 compatible = "fsl,sec-v6.0-job-ring", 43 "fsl,sec-v5.2-job-ring", 44 "fsl,sec-v5.0-job-ring", 45 "fsl,sec-v4.4-job-ring", 46 "fsl,sec-v4.0-job-ring"; 51 compatible = "fsl,sec-v6.0-job-ring", 52 "fsl,sec-v5.2-job-ring", 53 "fsl,sec-v5.0-job-ring", 54 "fsl,sec-v4.4-job-ring", 55 "fsl,sec-v4.0-job-ring";
|
D | qoriq-sec5.3-0.dtsi | 45 compatible = "fsl,sec-v5.3-job-ring", 46 "fsl,sec-v5.0-job-ring", 47 "fsl,sec-v4.0-job-ring"; 53 compatible = "fsl,sec-v5.3-job-ring", 54 "fsl,sec-v5.0-job-ring", 55 "fsl,sec-v4.0-job-ring"; 61 compatible = "fsl,sec-v5.3-job-ring", 62 "fsl,sec-v5.0-job-ring", 63 "fsl,sec-v4.0-job-ring"; 69 compatible = "fsl,sec-v5.3-job-ring", [all …]
|
D | qoriq-sec5.2-0.dtsi | 45 compatible = "fsl,sec-v5.2-job-ring", 46 "fsl,sec-v5.0-job-ring", 47 "fsl,sec-v4.0-job-ring"; 53 compatible = "fsl,sec-v5.2-job-ring", 54 "fsl,sec-v5.0-job-ring", 55 "fsl,sec-v4.0-job-ring"; 61 compatible = "fsl,sec-v5.2-job-ring", 62 "fsl,sec-v5.0-job-ring", 63 "fsl,sec-v4.0-job-ring"; 69 compatible = "fsl,sec-v5.2-job-ring", [all …]
|
D | pq3-sec4.4-0.dtsi | 45 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring"; 51 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring"; 57 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring"; 63 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
|
D | qoriq-raid1.0-0.dtsi | 50 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring"; 57 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring"; 72 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring"; 79 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring";
|
D | qoriq-sec4.2-0.dtsi | 45 compatible = "fsl,sec-v4.2-job-ring", 46 "fsl,sec-v4.0-job-ring"; 52 compatible = "fsl,sec-v4.2-job-ring", 53 "fsl,sec-v4.0-job-ring"; 59 compatible = "fsl,sec-v4.2-job-ring", 60 "fsl,sec-v4.0-job-ring"; 66 compatible = "fsl,sec-v4.2-job-ring", 67 "fsl,sec-v4.0-job-ring";
|
D | qoriq-sec5.0-0.dtsi | 45 compatible = "fsl,sec-v5.0-job-ring", 46 "fsl,sec-v4.0-job-ring"; 52 compatible = "fsl,sec-v5.0-job-ring", 53 "fsl,sec-v4.0-job-ring"; 59 compatible = "fsl,sec-v5.0-job-ring", 60 "fsl,sec-v4.0-job-ring"; 66 compatible = "fsl,sec-v5.0-job-ring", 67 "fsl,sec-v4.0-job-ring";
|
D | p1023si-post.dtsi | 187 compatible = "fsl,sec-v4.2-job-ring", 188 "fsl,sec-v4.0-job-ring"; 194 compatible = "fsl,sec-v4.2-job-ring", 195 "fsl,sec-v4.0-job-ring"; 201 compatible = "fsl,sec-v4.2-job-ring", 202 "fsl,sec-v4.0-job-ring"; 208 compatible = "fsl,sec-v4.2-job-ring", 209 "fsl,sec-v4.0-job-ring";
|
/linux-4.1.27/drivers/net/ethernet/freescale/ |
D | ucc_geth_ethtool.c | 215 struct ethtool_ringparam *ring) in uec_get_ringparam() argument 221 ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam() 222 ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam() 223 ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam() 224 ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam() 226 ring->rx_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam() 227 ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam() 228 ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam() 229 ring->tx_pending = ug_info->bdRingLenTx[queue]; in uec_get_ringparam() 234 struct ethtool_ringparam *ring) in uec_set_ringparam() argument [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/ |
D | raideng.txt | 48 There must be a sub-node for each job ring present in RAID Engine 51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value 52 This identifies job ring. Should contain either 53 "fsl,raideng-v1.0-hp-ring" or "fsl,raideng-v1.0-lp-ring" 54 depending upon whether ring has high or low priority 55 - reg: offset and length of the register set for job ring 56 - interrupts: interrupt mapping for job ring IRQ 75 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/ |
D | pci.c | 532 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; in _rtl_pci_tx_chk_waitq() local 540 (ring->entries - skb_queue_len(&ring->queue) > in _rtl_pci_tx_chk_waitq() 567 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; in _rtl_pci_tx_isr() local 569 while (skb_queue_len(&ring->queue)) { in _rtl_pci_tx_isr() 577 entry = (u8 *)(&ring->buffer_desc[ring->idx]); in _rtl_pci_tx_isr() 579 entry = (u8 *)(&ring->desc[ring->idx]); in _rtl_pci_tx_isr() 588 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) in _rtl_pci_tx_isr() 590 ring->idx = (ring->idx + 1) % ring->entries; in _rtl_pci_tx_isr() 592 skb = __skb_dequeue(&ring->queue); in _rtl_pci_tx_isr() 605 ring->idx, in _rtl_pci_tx_isr() [all …]
|
/linux-4.1.27/drivers/gpu/drm/i810/ |
D | i810_dma.c | 215 if (dev_priv->ring.virtual_start) in i810_dma_cleanup() 216 drm_legacy_ioremapfree(&dev_priv->ring.map, dev); in i810_dma_cleanup() 239 drm_i810_ring_buffer_t *ring = &(dev_priv->ring); in i810_wait_ring() local 245 while (ring->space < n) { in i810_wait_ring() 246 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; in i810_wait_ring() 247 ring->space = ring->head - (ring->tail + 8); in i810_wait_ring() 248 if (ring->space < 0) in i810_wait_ring() 249 ring->space += ring->Size; in i810_wait_ring() 251 if (ring->head != last_head) { in i810_wait_ring() 253 last_head = ring->head; in i810_wait_ring() [all …]
|
D | i810_drv.h | 86 drm_i810_ring_buffer_t ring; member 150 if (dev_priv->ring.space < n*4) \ 152 dev_priv->ring.space -= n*4; \ 153 outring = dev_priv->ring.tail; \ 154 ringmask = dev_priv->ring.tail_mask; \ 155 virt = dev_priv->ring.virtual_start; \ 161 dev_priv->ring.tail = outring; \
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | cassini.c | 294 static void cas_disable_irq(struct cas *cp, const int ring) in cas_disable_irq() argument 297 if (ring == 0) { in cas_disable_irq() 304 switch (ring) { in cas_disable_irq() 316 cp->regs + REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq() 321 REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq() 335 static void cas_enable_irq(struct cas *cp, const int ring) in cas_enable_irq() argument 337 if (ring == 0) { /* all but TX_DONE */ in cas_enable_irq() 343 switch (ring) { in cas_enable_irq() 355 REG_PLUS_INTRN_MASK(ring)); in cas_enable_irq() 1388 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, in cas_page_swap() argument [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/crypto/ |
D | fsl-sec6.txt | 84 Definition: Must include "fsl,sec-v6.0-job-ring". 103 compatible = "fsl,sec-v6.0-job-ring"; 123 compatible = "fsl,sec-v6.0-job-ring", 124 "fsl,sec-v5.2-job-ring", 125 "fsl,sec-v5.0-job-ring", 126 "fsl,sec-v4.4-job-ring", 127 "fsl,sec-v4.0-job-ring"; 132 compatible = "fsl,sec-v6.0-job-ring", 133 "fsl,sec-v5.2-job-ring", 134 "fsl,sec-v5.0-job-ring", [all …]
|
/linux-4.1.27/net/dccp/ccids/lib/ |
D | loss_interval.c | 28 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; in tfrc_lh_peek() 35 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; in tfrc_lh_get_interval() 43 if (lh->ring[LIH_INDEX(lh->counter)] == NULL) in tfrc_lh_demand_next() 44 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, in tfrc_lh_demand_next() 46 return lh->ring[LIH_INDEX(lh->counter)]; in tfrc_lh_demand_next() 55 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { in tfrc_lh_cleanup() 57 lh->ring[LIH_INDEX(lh->counter)]); in tfrc_lh_cleanup() 58 lh->ring[LIH_INDEX(lh->counter)] = NULL; in tfrc_lh_cleanup()
|
D | packet_history.c | 152 struct tfrc_rx_hist_entry *tmp = h->ring[idx_a]; in tfrc_rx_hist_swap() 154 h->ring[idx_a] = h->ring[idx_b]; in tfrc_rx_hist_swap() 155 h->ring[idx_b] = tmp; in tfrc_rx_hist_swap() 354 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); in tfrc_rx_hist_alloc() 355 if (h->ring[i] == NULL) in tfrc_rx_hist_alloc() 364 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_alloc() 365 h->ring[i] = NULL; in tfrc_rx_hist_alloc() 375 if (h->ring[i] != NULL) { in tfrc_rx_hist_purge() 376 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_purge() 377 h->ring[i] = NULL; in tfrc_rx_hist_purge() [all …]
|
D | packet_history.h | 95 struct tfrc_rx_hist_entry *ring[TFRC_NDUPACK + 1]; member 115 return h->ring[tfrc_rx_hist_index(h, h->loss_count)]; in tfrc_rx_hist_last_rcv() 124 return h->ring[tfrc_rx_hist_index(h, n)]; in tfrc_rx_hist_entry() 133 return h->ring[h->loss_start]; in tfrc_rx_hist_loss_prev()
|
/linux-4.1.27/drivers/net/ethernet/qualcomm/ |
D | qca_debug.c | 263 qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) in qcaspi_get_ringparam() argument 267 ring->rx_max_pending = 4; in qcaspi_get_ringparam() 268 ring->tx_max_pending = TX_RING_MAX_LEN; in qcaspi_get_ringparam() 269 ring->rx_pending = 4; in qcaspi_get_ringparam() 270 ring->tx_pending = qca->txr.count; in qcaspi_get_ringparam() 274 qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) in qcaspi_set_ringparam() argument 278 if ((ring->rx_pending) || in qcaspi_set_ringparam() 279 (ring->rx_mini_pending) || in qcaspi_set_ringparam() 280 (ring->rx_jumbo_pending)) in qcaspi_set_ringparam() 286 qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); in qcaspi_set_ringparam()
|
/linux-4.1.27/net/netlink/ |
D | af_netlink.c | 382 struct netlink_ring *ring; in __netlink_set_ring() local 385 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; in __netlink_set_ring() 389 ring->frame_max = req->nm_frame_nr - 1; in __netlink_set_ring() 390 ring->head = 0; in __netlink_set_ring() 391 ring->frame_size = req->nm_frame_size; in __netlink_set_ring() 392 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; in __netlink_set_ring() 394 swap(ring->pg_vec_len, req->nm_block_nr); in __netlink_set_ring() 395 swap(ring->pg_vec_order, order); in __netlink_set_ring() 396 swap(ring->pg_vec, pg_vec); in __netlink_set_ring() 411 struct netlink_ring *ring; in netlink_set_ring() local [all …]
|
D | diag.c | 12 static int sk_diag_put_ring(struct netlink_ring *ring, int nl_type, in sk_diag_put_ring() argument 17 ndr.ndr_block_size = ring->pg_vec_pages << PAGE_SHIFT; in sk_diag_put_ring() 18 ndr.ndr_block_nr = ring->pg_vec_len; in sk_diag_put_ring() 19 ndr.ndr_frame_size = ring->frame_size; in sk_diag_put_ring() 20 ndr.ndr_frame_nr = ring->frame_max + 1; in sk_diag_put_ring()
|
/linux-4.1.27/arch/tile/include/gxio/ |
D | mpipe.h | 559 unsigned int ring, 579 unsigned int ring); 588 *context, unsigned int ring); 628 gxio_mpipe_notif_group_add_ring(gxio_mpipe_notif_group_bits_t *bits, int ring) in gxio_mpipe_notif_group_add_ring() argument 630 bits->ring_mask[ring / 64] |= (1ull << (ring % 64)); in gxio_mpipe_notif_group_add_ring() 762 unsigned int ring, 777 int ring, int bucket, unsigned int count) in gxio_mpipe_credit() argument 791 offset.ring = ring; in gxio_mpipe_credit() 793 offset.ring_enable = (ring >= 0); in gxio_mpipe_credit() 1137 unsigned int ring; member [all …]
|
D | iorpc_mpipe.h | 75 unsigned int ring); 80 unsigned int ring); 83 unsigned int ring); 105 unsigned int ring, unsigned int channel);
|
/linux-4.1.27/include/uapi/linux/ |
D | virtio_ring.h | 77 __virtio16 ring[]; member 91 struct vring_used_elem ring[]; member 137 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) 138 #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num]) 146 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) in vring_init()
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
D | htt_tx.c | 209 struct htt_rx_ring_setup_ring *ring; in ath10k_htt_send_rx_ring_cfg_ll() local 224 + (sizeof(*ring) * num_rx_ring); in ath10k_htt_send_rx_ring_cfg_ll() 232 ring = &cmd->rx_setup.rings[0]; in ath10k_htt_send_rx_ring_cfg_ll() 258 ring->fw_idx_shadow_reg_paddr = in ath10k_htt_send_rx_ring_cfg_ll() 260 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); in ath10k_htt_send_rx_ring_cfg_ll() 261 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); in ath10k_htt_send_rx_ring_cfg_ll() 262 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); in ath10k_htt_send_rx_ring_cfg_ll() 263 ring->flags = __cpu_to_le16(flags); in ath10k_htt_send_rx_ring_cfg_ll() 264 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); in ath10k_htt_send_rx_ring_cfg_ll() 268 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); in ath10k_htt_send_rx_ring_cfg_ll() [all …]
|
/linux-4.1.27/arch/tile/gxio/ |
D | iorpc_mpipe.c | 102 unsigned int ring; member 107 unsigned int ring) in gxio_mpipe_init_notif_ring_aux() argument 122 params->ring = ring; in gxio_mpipe_init_notif_ring_aux() 133 unsigned int ring; member 139 unsigned int ring) in gxio_mpipe_request_notif_ring_interrupt() argument 148 params->ring = ring; in gxio_mpipe_request_notif_ring_interrupt() 158 unsigned int ring; member 162 unsigned int ring) in gxio_mpipe_enable_notif_ring_interrupt() argument 167 params->ring = ring; in gxio_mpipe_enable_notif_ring_interrupt() 287 unsigned int ring; member [all …]
|
D | mpipe.c | 185 unsigned int ring, in gxio_mpipe_init_notif_ring() argument 190 mem_flags, ring); in gxio_mpipe_init_notif_ring() 197 unsigned int ring, in gxio_mpipe_init_notif_group_and_buckets() argument 215 gxio_mpipe_notif_group_add_ring(&bits, ring + i); in gxio_mpipe_init_notif_group_and_buckets() 222 bucket_info.notifring = ring + (i % num_rings); in gxio_mpipe_init_notif_group_and_buckets() 236 unsigned int ring, unsigned int channel, in gxio_mpipe_init_edma_ring() argument 243 ring, channel); in gxio_mpipe_init_edma_ring() 391 unsigned int ring, in gxio_mpipe_iqueue_init() argument 399 iqueue->ring = ring; in gxio_mpipe_iqueue_init() 411 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size, in gxio_mpipe_iqueue_init() [all …]
|
/linux-4.1.27/drivers/staging/iio/accel/ |
D | sca3000_ring.c | 236 struct iio_hw_buffer *ring; in sca3000_rb_allocate() local 238 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in sca3000_rb_allocate() 239 if (!ring) in sca3000_rb_allocate() 242 ring->private = indio_dev; in sca3000_rb_allocate() 243 buf = &ring->buf; in sca3000_rb_allocate() 341 void sca3000_ring_int_process(u8 val, struct iio_buffer *ring) in sca3000_ring_int_process() argument 345 ring->stufftoread = true; in sca3000_ring_int_process() 346 wake_up_interruptible(&ring->pollq); in sca3000_ring_int_process()
|
/linux-4.1.27/drivers/gpu/drm/via/ |
D | via_dma.c | 161 if (dev_priv->ring.virtual_start) { in via_dma_cleanup() 164 drm_legacy_ioremapfree(&dev_priv->ring.map, dev); in via_dma_cleanup() 165 dev_priv->ring.virtual_start = NULL; in via_dma_cleanup() 182 if (dev_priv->ring.virtual_start != NULL) { in via_initialize() 197 dev_priv->ring.map.offset = dev->agp->base + init->offset; in via_initialize() 198 dev_priv->ring.map.size = init->size; in via_initialize() 199 dev_priv->ring.map.type = 0; in via_initialize() 200 dev_priv->ring.map.flags = 0; in via_initialize() 201 dev_priv->ring.map.mtrr = 0; in via_initialize() 203 drm_legacy_ioremap(&dev_priv->ring.map, dev); in via_initialize() [all …]
|
/linux-4.1.27/fs/ |
D | aio.c | 429 struct aio_ring *ring; in aio_setup_ring() local 506 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring() 507 ring->nr = nr_events; /* user copy */ in aio_setup_ring() 508 ring->id = ~0U; in aio_setup_ring() 509 ring->head = ring->tail = 0; in aio_setup_ring() 510 ring->magic = AIO_RING_MAGIC; in aio_setup_ring() 511 ring->compat_features = AIO_RING_COMPAT_FEATURES; in aio_setup_ring() 512 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; in aio_setup_ring() 513 ring->header_length = sizeof(struct aio_ring); in aio_setup_ring() 514 kunmap_atomic(ring); in aio_setup_ring() [all …]
|
/linux-4.1.27/drivers/staging/rtl8192e/rtl8192e/ |
D | rtl_core.c | 445 struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; in rtl8192_get_nic_desc_num() local 450 if ((prio == MGNT_QUEUE) && (skb_queue_len(&ring->queue) > 10)) in rtl8192_get_nic_desc_num() 453 prio, ring->idx, skb_queue_len(&ring->queue)); in rtl8192_get_nic_desc_num() 454 return skb_queue_len(&ring->queue); in rtl8192_get_nic_desc_num() 460 struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; in rtl8192_check_nic_enough_desc() local 462 if (ring->entries - skb_queue_len(&ring->queue) >= 2) in rtl8192_check_nic_enough_desc() 721 struct rtl8192_tx_ring *ring = NULL; in rtl8192_prepare_beacon() local 724 ring = &priv->tx_ring[BEACON_QUEUE]; in rtl8192_prepare_beacon() 725 pskb = __skb_dequeue(&ring->queue); in rtl8192_prepare_beacon() 740 pdesc = &ring->desc[0]; in rtl8192_prepare_beacon() [all …]
|
/linux-4.1.27/arch/tile/include/hv/ |
D | drv_trio_intf.h | 130 #define HV_TRIO_PUSH_DMA_OFFSET(ring) \ argument 133 ((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT)) 136 #define HV_TRIO_PULL_DMA_OFFSET(ring) \ argument 139 ((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT))
|
/linux-4.1.27/include/xen/interface/io/ |
D | ring.h | 28 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 29 sizeof(((struct _s##_sring *)0)->ring[0]))) 35 (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 84 union __name##_sring_entry ring[1]; /* variable-length */ \ 182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 185 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
D | console.h | 14 #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) argument
|
/linux-4.1.27/net/packet/ |
D | diag.c | 71 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type, in pdiag_put_ring() argument 76 if (!ring->pg_vec || ((ver > TPACKET_V2) && in pdiag_put_ring() 80 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT; in pdiag_put_ring() 81 pdr.pdr_block_nr = ring->pg_vec_len; in pdiag_put_ring() 82 pdr.pdr_frame_size = ring->frame_size; in pdiag_put_ring() 83 pdr.pdr_frame_nr = ring->frame_max + 1; in pdiag_put_ring() 86 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov; in pdiag_put_ring() 87 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv; in pdiag_put_ring() 88 pdr.pdr_features = ring->prb_bdqc.feature_req_word; in pdiag_put_ring()
|
/linux-4.1.27/drivers/crypto/caam/ |
D | ctrl.c | 282 int ring, ret = 0; in caam_remove() local 289 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { in caam_remove() 290 if (ctrlpriv->jrpdev[ring]) in caam_remove() 291 of_device_unregister(ctrlpriv->jrpdev[ring]); in caam_remove() 387 int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; in caam_probe() local 503 ring = 0; in caam_probe() 508 ctrlpriv->jrpdev[ring] = in caam_probe() 510 if (!ctrlpriv->jrpdev[ring]) { in caam_probe() 512 ring); in caam_probe() 515 ctrlpriv->jr[ring] = (struct caam_job_ring __force *) in caam_probe() [all …]
|
D | Kconfig | 7 This module creates job ring devices, and configures h/w 21 and Assurance Module (CAAM). This module adds a job ring operation 34 range 2-9 (ring size 4-512). 64 equal or greater than the job ring size will force timeouts. 85 stack) to the SEC4 via job ring. 97 scatterlist crypto API to the SEC4 via job ring.
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb.h | 225 struct igb_ring *ring; /* pointer to linked list of rings */ member 288 struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; member 315 static inline int igb_desc_unused(struct igb_ring *ring) in igb_desc_unused() argument 317 if (ring->next_to_clean > ring->next_to_use) in igb_desc_unused() 318 return ring->next_to_clean - ring->next_to_use - 1; in igb_desc_unused() 320 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in igb_desc_unused()
|
D | igb_main.c | 801 if (q_vector->rx.ring) in igb_assign_vector() 802 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector() 803 if (q_vector->tx.ring) in igb_assign_vector() 804 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector() 962 if (q_vector->rx.ring && q_vector->tx.ring) in igb_request_msix() 964 q_vector->rx.ring->queue_index); in igb_request_msix() 965 else if (q_vector->tx.ring) in igb_request_msix() 967 q_vector->tx.ring->queue_index); in igb_request_msix() 968 else if (q_vector->rx.ring) in igb_request_msix() 970 q_vector->rx.ring->queue_index); in igb_request_msix() [all …]
|
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8180/ |
D | dev.c | 345 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; in rtl8180_handle_tx() local 347 while (skb_queue_len(&ring->queue)) { in rtl8180_handle_tx() 348 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; in rtl8180_handle_tx() 356 ring->idx = (ring->idx + 1) % ring->entries; in rtl8180_handle_tx() 357 skb = __skb_dequeue(&ring->queue); in rtl8180_handle_tx() 371 if (ring->entries - skb_queue_len(&ring->queue) == 2) in rtl8180_handle_tx() 461 struct rtl8180_tx_ring *ring; in rtl8180_tx() local 474 ring = &priv->tx_ring[prio]; in rtl8180_tx() 544 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; in rtl8180_tx() 545 entry = &ring->desc[idx]; in rtl8180_tx() [all …]
|
/linux-4.1.27/drivers/net/ethernet/oki-semi/pch_gbe/ |
D | pch_gbe_ethtool.c | 267 struct ethtool_ringparam *ring) in pch_gbe_get_ringparam() argument 273 ring->rx_max_pending = PCH_GBE_MAX_RXD; in pch_gbe_get_ringparam() 274 ring->tx_max_pending = PCH_GBE_MAX_TXD; in pch_gbe_get_ringparam() 275 ring->rx_pending = rxdr->count; in pch_gbe_get_ringparam() 276 ring->tx_pending = txdr->count; in pch_gbe_get_ringparam() 288 struct ethtool_ringparam *ring) in pch_gbe_set_ringparam() argument 296 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in pch_gbe_set_ringparam() 320 clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD); in pch_gbe_set_ringparam() 324 clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD); in pch_gbe_set_ringparam()
|
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/ |
D | ethtool.c | 204 struct ethtool_ringparam *ring) in igbvf_get_ringparam() argument 210 ring->rx_max_pending = IGBVF_MAX_RXD; in igbvf_get_ringparam() 211 ring->tx_max_pending = IGBVF_MAX_TXD; in igbvf_get_ringparam() 212 ring->rx_pending = rx_ring->count; in igbvf_get_ringparam() 213 ring->tx_pending = tx_ring->count; in igbvf_get_ringparam() 217 struct ethtool_ringparam *ring) in igbvf_set_ringparam() argument 224 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in igbvf_set_ringparam() 227 new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); in igbvf_set_ringparam() 231 new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); in igbvf_set_ringparam()
|
/linux-4.1.27/drivers/crypto/qat/qat_dh895xcc/ |
D | adf_hw_arbiter.c | 127 void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) in adf_update_ring_arb_enable() argument 129 WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, in adf_update_ring_arb_enable() 130 ring->bank->bank_number, in adf_update_ring_arb_enable() 131 ring->bank->ring_mask & 0xFF); in adf_update_ring_arb_enable()
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192ee/ |
D | trx.c | 929 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc() local 930 u16 max_tx_desc = ring->entries; in rtl92ee_set_desc() 933 ring->cur_tx_wp = 0; in rtl92ee_set_desc() 934 ring->cur_tx_rp = 0; in rtl92ee_set_desc() 939 ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc); in rtl92ee_set_desc() 942 ring->cur_tx_wp = 0; in rtl92ee_set_desc() 945 if (ring->avl_desc > 1) { in rtl92ee_set_desc() 946 ring->avl_desc--; in rtl92ee_set_desc() 950 ring->cur_tx_wp); in rtl92ee_set_desc() 956 if (ring->avl_desc < (max_tx_desc - 15)) { in rtl92ee_set_desc() [all …]
|
/linux-4.1.27/drivers/scsi/ |
D | xen-scsifront.c | 110 struct vscsiif_front_ring ring; member 177 struct vscsiif_front_ring *ring = &(info->ring); in scsifront_pre_req() local 185 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); in scsifront_pre_req() 187 ring->req_prod_pvt++; in scsifront_pre_req() 196 struct vscsiif_front_ring *ring = &(info->ring); in scsifront_do_request() local 199 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); in scsifront_do_request() 303 rp = info->ring.sring->rsp_prod; in scsifront_ring_drain() 305 for (i = info->ring.rsp_cons; i != rp; i++) { in scsifront_ring_drain() 306 ring_rsp = RING_GET_RESPONSE(&info->ring, i); in scsifront_ring_drain() 310 info->ring.rsp_cons = i; in scsifront_ring_drain() [all …]
|
/linux-4.1.27/arch/tile/include/arch/ |
D | mpipe.h | 41 uint_reg_t ring : 8; member 67 uint_reg_t ring : 8; 246 uint_reg_t ring : 6; member 266 uint_reg_t ring : 6;
|
/linux-4.1.27/drivers/tty/serial/ |
D | atmel_serial.c | 601 struct circ_buf *ring = &atmel_port->rx_ring; in atmel_buffer_rx_char() local 604 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) in atmel_buffer_rx_char() 608 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; in atmel_buffer_rx_char() 615 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); in atmel_buffer_rx_char() 929 struct circ_buf *ring = &atmel_port->rx_ring; in atmel_rx_from_dma() local 962 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; in atmel_rx_from_dma() 963 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); in atmel_rx_from_dma() 976 if (ring->head < ring->tail) { in atmel_rx_from_dma() 977 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; in atmel_rx_from_dma() 979 tty_insert_flip_string(tport, ring->buf + ring->tail, count); in atmel_rx_from_dma() [all …]
|
/linux-4.1.27/drivers/net/ethernet/neterion/ |
D | s2io.c | 706 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem() local 715 ring->block_count = rx_cfg->num_rxd / in init_shared_mem() 717 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; in init_shared_mem() 726 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem() local 728 ring->rx_curr_get_info.block_index = 0; in init_shared_mem() 729 ring->rx_curr_get_info.offset = 0; in init_shared_mem() 730 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1; in init_shared_mem() 731 ring->rx_curr_put_info.block_index = 0; in init_shared_mem() 732 ring->rx_curr_put_info.offset = 0; in init_shared_mem() 733 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; in init_shared_mem() [all …]
|
/linux-4.1.27/net/hsr/ |
D | Kconfig | 11 and it must be connected as a node in a ring network together with 15 directions on the ring (over both slave ports), giving a redundant, 16 instant fail-over network. Each HSR node in the ring acts like a
|
/linux-4.1.27/Documentation/trace/ |
D | ring-buffer-design.txt | 17 tail - where new writes happen in the ring buffer. 19 head - where new reads happen in the ring buffer. 21 producer - the task that writes into the ring buffer (same as writer) 29 reader_page - A page outside the ring buffer used solely (for the most part) 52 The ring buffer can be used in either an overwrite mode or in 91 The ring buffer is made up of a list of pages held together by a linked list. 94 part of the ring buffer. 107 become part of the ring buffer and the head_page will be removed. 173 if what is in the ring buffer is less than what is held in a buffer page. 192 When the writer leaves the page, it simply goes into the ring buffer [all …]
|