Lines Matching refs:ring
22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument
25 static int ring_interrupt_index(struct tb_ring *ring) in ring_interrupt_index() argument
27 int bit = ring->hop; in ring_interrupt_index()
28 if (!ring->is_tx) in ring_interrupt_index()
29 bit += ring->nhi->hop_count; in ring_interrupt_index()
38 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument
40 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; in ring_interrupt_active()
41 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active()
44 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active()
50 dev_info(&ring->nhi->pdev->dev, in ring_interrupt_active()
55 dev_WARN(&ring->nhi->pdev->dev, in ring_interrupt_active()
57 RING_TYPE(ring), ring->hop, in ring_interrupt_active()
59 iowrite32(new, ring->nhi->iobase + reg); in ring_interrupt_active()
81 static void __iomem *ring_desc_base(struct tb_ring *ring) in ring_desc_base() argument
83 void __iomem *io = ring->nhi->iobase; in ring_desc_base()
84 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; in ring_desc_base()
85 io += ring->hop * 16; in ring_desc_base()
89 static void __iomem *ring_options_base(struct tb_ring *ring) in ring_options_base() argument
91 void __iomem *io = ring->nhi->iobase; in ring_options_base()
92 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; in ring_options_base()
93 io += ring->hop * 32; in ring_options_base()
97 static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) in ring_iowrite16desc() argument
99 iowrite16(value, ring_desc_base(ring) + offset); in ring_iowrite16desc()
102 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) in ring_iowrite32desc() argument
104 iowrite32(value, ring_desc_base(ring) + offset); in ring_iowrite32desc()
107 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) in ring_iowrite64desc() argument
109 iowrite32(value, ring_desc_base(ring) + offset); in ring_iowrite64desc()
110 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); in ring_iowrite64desc()
113 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) in ring_iowrite32options() argument
115 iowrite32(value, ring_options_base(ring) + offset); in ring_iowrite32options()
118 static bool ring_full(struct tb_ring *ring) in ring_full() argument
120 return ((ring->head + 1) % ring->size) == ring->tail; in ring_full()
123 static bool ring_empty(struct tb_ring *ring) in ring_empty() argument
125 return ring->head == ring->tail; in ring_empty()
133 static void ring_write_descriptors(struct tb_ring *ring) in ring_write_descriptors() argument
137 list_for_each_entry_safe(frame, n, &ring->queue, list) { in ring_write_descriptors()
138 if (ring_full(ring)) in ring_write_descriptors()
140 list_move_tail(&frame->list, &ring->in_flight); in ring_write_descriptors()
141 descriptor = &ring->descriptors[ring->head]; in ring_write_descriptors()
145 if (ring->is_tx) { in ring_write_descriptors()
150 ring->head = (ring->head + 1) % ring->size; in ring_write_descriptors()
151 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); in ring_write_descriptors()
166 struct tb_ring *ring = container_of(work, typeof(*ring), work); in ring_work() local
170 mutex_lock(&ring->lock); in ring_work()
172 if (!ring->running) { in ring_work()
174 list_splice_tail_init(&ring->in_flight, &done); in ring_work()
175 list_splice_tail_init(&ring->queue, &done); in ring_work()
180 while (!ring_empty(ring)) { in ring_work()
181 if (!(ring->descriptors[ring->tail].flags in ring_work()
184 frame = list_first_entry(&ring->in_flight, typeof(*frame), in ring_work()
187 if (!ring->is_tx) { in ring_work()
188 frame->size = ring->descriptors[ring->tail].length; in ring_work()
189 frame->eof = ring->descriptors[ring->tail].eof; in ring_work()
190 frame->sof = ring->descriptors[ring->tail].sof; in ring_work()
191 frame->flags = ring->descriptors[ring->tail].flags; in ring_work()
193 dev_WARN(&ring->nhi->pdev->dev, in ring_work()
195 RING_TYPE(ring), ring->hop, in ring_work()
205 dev_WARN(&ring->nhi->pdev->dev, in ring_work()
207 RING_TYPE(ring), ring->hop, in ring_work()
210 ring->tail = (ring->tail + 1) % ring->size; in ring_work()
212 ring_write_descriptors(ring); in ring_work()
215 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */ in ring_work()
223 frame->callback(ring, frame, canceled); in ring_work()
227 int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) in __ring_enqueue() argument
230 mutex_lock(&ring->lock); in __ring_enqueue()
231 if (ring->running) { in __ring_enqueue()
232 list_add_tail(&frame->list, &ring->queue); in __ring_enqueue()
233 ring_write_descriptors(ring); in __ring_enqueue()
237 mutex_unlock(&ring->lock); in __ring_enqueue()
244 struct tb_ring *ring = NULL; in ring_alloc() local
260 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in ring_alloc()
261 if (!ring) in ring_alloc()
264 mutex_init(&ring->lock); in ring_alloc()
265 INIT_LIST_HEAD(&ring->queue); in ring_alloc()
266 INIT_LIST_HEAD(&ring->in_flight); in ring_alloc()
267 INIT_WORK(&ring->work, ring_work); in ring_alloc()
269 ring->nhi = nhi; in ring_alloc()
270 ring->hop = hop; in ring_alloc()
271 ring->is_tx = transmit; in ring_alloc()
272 ring->size = size; in ring_alloc()
273 ring->head = 0; in ring_alloc()
274 ring->tail = 0; in ring_alloc()
275 ring->running = false; in ring_alloc()
276 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, in ring_alloc()
277 size * sizeof(*ring->descriptors), in ring_alloc()
278 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); in ring_alloc()
279 if (!ring->descriptors) in ring_alloc()
283 nhi->tx_rings[hop] = ring; in ring_alloc()
285 nhi->rx_rings[hop] = ring; in ring_alloc()
287 return ring; in ring_alloc()
290 if (ring) in ring_alloc()
291 mutex_destroy(&ring->lock); in ring_alloc()
292 kfree(ring); in ring_alloc()
312 void ring_start(struct tb_ring *ring) in ring_start() argument
314 mutex_lock(&ring->nhi->lock); in ring_start()
315 mutex_lock(&ring->lock); in ring_start()
316 if (ring->running) { in ring_start()
317 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); in ring_start()
320 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", in ring_start()
321 RING_TYPE(ring), ring->hop); in ring_start()
323 ring_iowrite64desc(ring, ring->descriptors_dma, 0); in ring_start()
324 if (ring->is_tx) { in ring_start()
325 ring_iowrite32desc(ring, ring->size, 12); in ring_start()
326 ring_iowrite32options(ring, 0, 4); /* time releated ? */ in ring_start()
327 ring_iowrite32options(ring, in ring_start()
330 ring_iowrite32desc(ring, in ring_start()
331 (TB_FRAME_SIZE << 16) | ring->size, 12); in ring_start()
332 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */ in ring_start()
333 ring_iowrite32options(ring, in ring_start()
336 ring_interrupt_active(ring, true); in ring_start()
337 ring->running = true; in ring_start()
339 mutex_unlock(&ring->lock); in ring_start()
340 mutex_unlock(&ring->nhi->lock); in ring_start()
356 void ring_stop(struct tb_ring *ring) in ring_stop() argument
358 mutex_lock(&ring->nhi->lock); in ring_stop()
359 mutex_lock(&ring->lock); in ring_stop()
360 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", in ring_stop()
361 RING_TYPE(ring), ring->hop); in ring_stop()
362 if (!ring->running) { in ring_stop()
363 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", in ring_stop()
364 RING_TYPE(ring), ring->hop); in ring_stop()
367 ring_interrupt_active(ring, false); in ring_stop()
369 ring_iowrite32options(ring, 0, 0); in ring_stop()
370 ring_iowrite64desc(ring, 0, 0); in ring_stop()
371 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); in ring_stop()
372 ring_iowrite32desc(ring, 0, 12); in ring_stop()
373 ring->head = 0; in ring_stop()
374 ring->tail = 0; in ring_stop()
375 ring->running = false; in ring_stop()
378 mutex_unlock(&ring->lock); in ring_stop()
379 mutex_unlock(&ring->nhi->lock); in ring_stop()
384 schedule_work(&ring->work); in ring_stop()
385 flush_work(&ring->work); in ring_stop()
398 void ring_free(struct tb_ring *ring) in ring_free() argument
400 mutex_lock(&ring->nhi->lock); in ring_free()
405 if (ring->is_tx) in ring_free()
406 ring->nhi->tx_rings[ring->hop] = NULL; in ring_free()
408 ring->nhi->rx_rings[ring->hop] = NULL; in ring_free()
410 if (ring->running) { in ring_free()
411 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", in ring_free()
412 RING_TYPE(ring), ring->hop); in ring_free()
415 dma_free_coherent(&ring->nhi->pdev->dev, in ring_free()
416 ring->size * sizeof(*ring->descriptors), in ring_free()
417 ring->descriptors, ring->descriptors_dma); in ring_free()
419 ring->descriptors = NULL; in ring_free()
420 ring->descriptors_dma = 0; in ring_free()
423 dev_info(&ring->nhi->pdev->dev, in ring_free()
425 RING_TYPE(ring), in ring_free()
426 ring->hop); in ring_free()
428 mutex_unlock(&ring->nhi->lock); in ring_free()
434 flush_work(&ring->work); in ring_free()
435 mutex_destroy(&ring->lock); in ring_free()
436 kfree(ring); in ring_free()
446 struct tb_ring *ring; in nhi_interrupt_work() local
473 ring = nhi->tx_rings[hop]; in nhi_interrupt_work()
475 ring = nhi->rx_rings[hop]; in nhi_interrupt_work()
476 if (ring == NULL) { in nhi_interrupt_work()
484 schedule_work(&ring->work); in nhi_interrupt_work()