This source file includes following definitions.
- tegra_ivc_invalidate
- tegra_ivc_flush
- tegra_ivc_empty
- tegra_ivc_full
- tegra_ivc_available
- tegra_ivc_advance_tx
- tegra_ivc_advance_rx
- tegra_ivc_check_read
- tegra_ivc_check_write
- tegra_ivc_frame_virt
- tegra_ivc_frame_phys
- tegra_ivc_invalidate_frame
- tegra_ivc_flush_frame
- tegra_ivc_read_get_next_frame
- tegra_ivc_read_advance
- tegra_ivc_write_get_next_frame
- tegra_ivc_write_advance
- tegra_ivc_reset
- tegra_ivc_notified
- tegra_ivc_align
- tegra_ivc_total_queue_size
- tegra_ivc_check_params
- tegra_ivc_init
- tegra_ivc_cleanup
1
2
3
4
5
6 #include <soc/tegra/ivc.h>
7
8 #define TEGRA_IVC_ALIGN 64
9
10
11
12
13
14
15 enum tegra_ivc_state {
16
17
18
19
20
21
22
23
24
25
26
27 TEGRA_IVC_STATE_ESTABLISHED = 0,
28
29
30
31
32
33
34
35 TEGRA_IVC_STATE_SYNC,
36
37
38
39
40
41
42
43 TEGRA_IVC_STATE_ACK
44 };
45
46
47
48
49
50
51
52
53 struct tegra_ivc_header {
54 union {
55 struct {
56
57 u32 count;
58 u32 state;
59 };
60
61 u8 pad[TEGRA_IVC_ALIGN];
62 } tx;
63
64 union {
65
66 u32 count;
67 u8 pad[TEGRA_IVC_ALIGN];
68 } rx;
69 };
70
71 static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
72 {
73 if (!ivc->peer)
74 return;
75
76 dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
77 DMA_FROM_DEVICE);
78 }
79
80 static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
81 {
82 if (!ivc->peer)
83 return;
84
85 dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
86 DMA_TO_DEVICE);
87 }
88
89 static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
90 struct tegra_ivc_header *header)
91 {
92
93
94
95
96
97 u32 tx = READ_ONCE(header->tx.count);
98 u32 rx = READ_ONCE(header->rx.count);
99
100
101
102
103
104
105
106
107
108
109
110 if (tx - rx > ivc->num_frames)
111 return true;
112
113 return tx == rx;
114 }
115
116 static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
117 struct tegra_ivc_header *header)
118 {
119 u32 tx = READ_ONCE(header->tx.count);
120 u32 rx = READ_ONCE(header->rx.count);
121
122
123
124
125
126 return tx - rx >= ivc->num_frames;
127 }
128
129 static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
130 struct tegra_ivc_header *header)
131 {
132 u32 tx = READ_ONCE(header->tx.count);
133 u32 rx = READ_ONCE(header->rx.count);
134
135
136
137
138
139
140
141 return tx - rx;
142 }
143
144 static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
145 {
146 WRITE_ONCE(ivc->tx.channel->tx.count,
147 READ_ONCE(ivc->tx.channel->tx.count) + 1);
148
149 if (ivc->tx.position == ivc->num_frames - 1)
150 ivc->tx.position = 0;
151 else
152 ivc->tx.position++;
153 }
154
155 static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
156 {
157 WRITE_ONCE(ivc->rx.channel->rx.count,
158 READ_ONCE(ivc->rx.channel->rx.count) + 1);
159
160 if (ivc->rx.position == ivc->num_frames - 1)
161 ivc->rx.position = 0;
162 else
163 ivc->rx.position++;
164 }
165
166 static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
167 {
168 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
169
170
171
172
173
174
175
176
177
178 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
179 return -ECONNRESET;
180
181
182
183
184
185
186
187
188 if (!tegra_ivc_empty(ivc, ivc->rx.channel))
189 return 0;
190
191 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
192
193 if (tegra_ivc_empty(ivc, ivc->rx.channel))
194 return -ENOSPC;
195
196 return 0;
197 }
198
199 static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
200 {
201 unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
202
203 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
204 return -ECONNRESET;
205
206 if (!tegra_ivc_full(ivc, ivc->tx.channel))
207 return 0;
208
209 tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
210
211 if (tegra_ivc_full(ivc, ivc->tx.channel))
212 return -ENOSPC;
213
214 return 0;
215 }
216
217 static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
218 struct tegra_ivc_header *header,
219 unsigned int frame)
220 {
221 if (WARN_ON(frame >= ivc->num_frames))
222 return ERR_PTR(-EINVAL);
223
224 return (void *)(header + 1) + ivc->frame_size * frame;
225 }
226
227 static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
228 dma_addr_t phys,
229 unsigned int frame)
230 {
231 unsigned long offset;
232
233 offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
234
235 return phys + offset;
236 }
237
238 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
239 dma_addr_t phys,
240 unsigned int frame,
241 unsigned int offset,
242 size_t size)
243 {
244 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
245 return;
246
247 phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
248
249 dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
250 }
251
252 static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
253 dma_addr_t phys,
254 unsigned int frame,
255 unsigned int offset,
256 size_t size)
257 {
258 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
259 return;
260
261 phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
262
263 dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
264 }
265
266
267 void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
268 {
269 int err;
270
271 if (WARN_ON(ivc == NULL))
272 return ERR_PTR(-EINVAL);
273
274 err = tegra_ivc_check_read(ivc);
275 if (err < 0)
276 return ERR_PTR(err);
277
278
279
280
281
282 smp_rmb();
283
284 tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
285 ivc->frame_size);
286
287 return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
288 }
289 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
290
291 int tegra_ivc_read_advance(struct tegra_ivc *ivc)
292 {
293 unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
294 unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
295 int err;
296
297
298
299
300
301
302 err = tegra_ivc_check_read(ivc);
303 if (err < 0)
304 return err;
305
306 tegra_ivc_advance_rx(ivc);
307
308 tegra_ivc_flush(ivc, ivc->rx.phys + rx);
309
310
311
312
313
314 smp_mb();
315
316
317
318
319
320
321 tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
322
323 if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
324 ivc->notify(ivc, ivc->notify_data);
325
326 return 0;
327 }
328 EXPORT_SYMBOL(tegra_ivc_read_advance);
329
330
331 void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
332 {
333 int err;
334
335 err = tegra_ivc_check_write(ivc);
336 if (err < 0)
337 return ERR_PTR(err);
338
339 return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
340 }
341 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
342
343
344 int tegra_ivc_write_advance(struct tegra_ivc *ivc)
345 {
346 unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
347 unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
348 int err;
349
350 err = tegra_ivc_check_write(ivc);
351 if (err < 0)
352 return err;
353
354 tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
355 ivc->frame_size);
356
357
358
359
360
361 smp_wmb();
362
363 tegra_ivc_advance_tx(ivc);
364 tegra_ivc_flush(ivc, ivc->tx.phys + tx);
365
366
367
368
369
370 smp_mb();
371
372
373
374
375
376
377 tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
378
379 if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
380 ivc->notify(ivc, ivc->notify_data);
381
382 return 0;
383 }
384 EXPORT_SYMBOL(tegra_ivc_write_advance);
385
386 void tegra_ivc_reset(struct tegra_ivc *ivc)
387 {
388 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
389
390 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
391 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
392 ivc->notify(ivc, ivc->notify_data);
393 }
394 EXPORT_SYMBOL(tegra_ivc_reset);
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 int tegra_ivc_notified(struct tegra_ivc *ivc)
417 {
418 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
419 enum tegra_ivc_state state;
420
421
422 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
423 state = READ_ONCE(ivc->rx.channel->tx.state);
424
425 if (state == TEGRA_IVC_STATE_SYNC) {
426 offset = offsetof(struct tegra_ivc_header, tx.count);
427
428
429
430
431
432 smp_rmb();
433
434
435
436
437
438
439 ivc->tx.channel->tx.count = 0;
440 ivc->rx.channel->rx.count = 0;
441
442 ivc->tx.position = 0;
443 ivc->rx.position = 0;
444
445
446
447
448
449 smp_wmb();
450
451
452
453
454
455 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
456 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
457
458
459
460
461 ivc->notify(ivc, ivc->notify_data);
462
463 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
464 state == TEGRA_IVC_STATE_ACK) {
465 offset = offsetof(struct tegra_ivc_header, tx.count);
466
467
468
469
470
471 smp_rmb();
472
473
474
475
476
477
478 ivc->tx.channel->tx.count = 0;
479 ivc->rx.channel->rx.count = 0;
480
481 ivc->tx.position = 0;
482 ivc->rx.position = 0;
483
484
485
486
487
488 smp_wmb();
489
490
491
492
493
494
495 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
496 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
497
498
499
500
501 ivc->notify(ivc, ivc->notify_data);
502
503 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
504 offset = offsetof(struct tegra_ivc_header, tx.count);
505
506
507
508
509
510
511 smp_rmb();
512
513
514
515
516
517
518
519 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
520 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
521
522
523
524
525 ivc->notify(ivc, ivc->notify_data);
526
527 } else {
528
529
530
531
532
533
534 }
535
536 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
537 return -EAGAIN;
538
539 return 0;
540 }
541 EXPORT_SYMBOL(tegra_ivc_notified);
542
543 size_t tegra_ivc_align(size_t size)
544 {
545 return ALIGN(size, TEGRA_IVC_ALIGN);
546 }
547 EXPORT_SYMBOL(tegra_ivc_align);
548
549 unsigned tegra_ivc_total_queue_size(unsigned queue_size)
550 {
551 if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
552 pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
553 __func__, queue_size, TEGRA_IVC_ALIGN);
554 return 0;
555 }
556
557 return queue_size + sizeof(struct tegra_ivc_header);
558 }
559 EXPORT_SYMBOL(tegra_ivc_total_queue_size);
560
561 static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
562 unsigned int num_frames, size_t frame_size)
563 {
564 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
565 TEGRA_IVC_ALIGN));
566 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
567 TEGRA_IVC_ALIGN));
568 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
569 TEGRA_IVC_ALIGN));
570
571 if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
572 pr_err("num_frames * frame_size overflows\n");
573 return -EINVAL;
574 }
575
576 if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
577 pr_err("frame size not adequately aligned: %zu\n", frame_size);
578 return -EINVAL;
579 }
580
581
582
583
584
585 if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
586 pr_err("IVC channel start not aligned: %#lx\n", rx);
587 return -EINVAL;
588 }
589
590 if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
591 pr_err("IVC channel start not aligned: %#lx\n", tx);
592 return -EINVAL;
593 }
594
595 if (rx < tx) {
596 if (rx + frame_size * num_frames > tx) {
597 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
598 rx, frame_size * num_frames, tx);
599 return -EINVAL;
600 }
601 } else {
602 if (tx + frame_size * num_frames > rx) {
603 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
604 tx, frame_size * num_frames, rx);
605 return -EINVAL;
606 }
607 }
608
609 return 0;
610 }
611
612 int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
613 dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
614 unsigned int num_frames, size_t frame_size,
615 void (*notify)(struct tegra_ivc *ivc, void *data),
616 void *data)
617 {
618 size_t queue_size;
619 int err;
620
621 if (WARN_ON(!ivc || !notify))
622 return -EINVAL;
623
624
625
626
627
628 if (frame_size > INT_MAX)
629 return -E2BIG;
630
631 err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
632 num_frames, frame_size);
633 if (err < 0)
634 return err;
635
636 queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
637
638 if (peer) {
639 ivc->rx.phys = dma_map_single(peer, rx, queue_size,
640 DMA_BIDIRECTIONAL);
641 if (dma_mapping_error(peer, ivc->rx.phys))
642 return -ENOMEM;
643
644 ivc->tx.phys = dma_map_single(peer, tx, queue_size,
645 DMA_BIDIRECTIONAL);
646 if (dma_mapping_error(peer, ivc->tx.phys)) {
647 dma_unmap_single(peer, ivc->rx.phys, queue_size,
648 DMA_BIDIRECTIONAL);
649 return -ENOMEM;
650 }
651 } else {
652 ivc->rx.phys = rx_phys;
653 ivc->tx.phys = tx_phys;
654 }
655
656 ivc->rx.channel = rx;
657 ivc->tx.channel = tx;
658 ivc->peer = peer;
659 ivc->notify = notify;
660 ivc->notify_data = data;
661 ivc->frame_size = frame_size;
662 ivc->num_frames = num_frames;
663
664
665
666
667
668 ivc->tx.position = 0;
669 ivc->rx.position = 0;
670
671 return 0;
672 }
673 EXPORT_SYMBOL(tegra_ivc_init);
674
675 void tegra_ivc_cleanup(struct tegra_ivc *ivc)
676 {
677 if (ivc->peer) {
678 size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
679 ivc->frame_size);
680
681 dma_unmap_single(ivc->peer, ivc->rx.phys, size,
682 DMA_BIDIRECTIONAL);
683 dma_unmap_single(ivc->peer, ivc->tx.phys, size,
684 DMA_BIDIRECTIONAL);
685 }
686 }
687 EXPORT_SYMBOL(tegra_ivc_cleanup);