This source file includes following definitions.
- to_hsdma_dev
- to_hsdma_vchan
- to_hsdma_vdesc
- hsdma2dev
- mtk_dma_read
- mtk_dma_write
- mtk_dma_rmw
- mtk_dma_set
- mtk_dma_clr
- mtk_hsdma_vdesc_free
- mtk_hsdma_busy_wait
- mtk_hsdma_alloc_pchan
- mtk_hsdma_free_pchan
- mtk_hsdma_issue_pending_vdesc
- mtk_hsdma_issue_vchan_pending
- mtk_hsdma_free_rooms_in_ring
- mtk_hsdma_irq
- mtk_hsdma_find_active_desc
- mtk_hsdma_tx_status
- mtk_hsdma_issue_pending
- mtk_hsdma_prep_dma_memcpy
- mtk_hsdma_free_inactive_desc
- mtk_hsdma_free_active_desc
- mtk_hsdma_terminate_all
- mtk_hsdma_alloc_chan_resources
- mtk_hsdma_free_chan_resources
- mtk_hsdma_hw_init
- mtk_hsdma_hw_deinit
- mtk_hsdma_probe
- mtk_hsdma_remove
1
2
3
4
5
6
7
8
9
10
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/refcount.h>
25 #include <linux/slab.h>
26
27 #include "../virt-dma.h"
28
29 #define MTK_HSDMA_USEC_POLL 20
30 #define MTK_HSDMA_TIMEOUT_POLL 200000
31 #define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
32
33
34 #define MTK_HSDMA_NR_VCHANS 3
35
36
37 #define MTK_HSDMA_NR_MAX_PCHANS 1
38
39
40
41 #define MTK_DMA_SIZE 64
42 #define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1))
43 #define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1))
44 #define MTK_HSDMA_MAX_LEN 0x3f80
45 #define MTK_HSDMA_ALIGN_SIZE 4
46 #define MTK_HSDMA_PLEN_MASK 0x3fff
47 #define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16)
48 #define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK)
49
50
51 #define MTK_HSDMA_TX_BASE 0x0
52 #define MTK_HSDMA_TX_CNT 0x4
53 #define MTK_HSDMA_TX_CPU 0x8
54 #define MTK_HSDMA_TX_DMA 0xc
55 #define MTK_HSDMA_RX_BASE 0x100
56 #define MTK_HSDMA_RX_CNT 0x104
57 #define MTK_HSDMA_RX_CPU 0x108
58 #define MTK_HSDMA_RX_DMA 0x10c
59
60
61 #define MTK_HSDMA_GLO 0x204
62 #define MTK_HSDMA_GLO_MULTI_DMA BIT(10)
63 #define MTK_HSDMA_TX_WB_DDONE BIT(6)
64 #define MTK_HSDMA_BURST_64BYTES (0x2 << 4)
65 #define MTK_HSDMA_GLO_RX_BUSY BIT(3)
66 #define MTK_HSDMA_GLO_RX_DMA BIT(2)
67 #define MTK_HSDMA_GLO_TX_BUSY BIT(1)
68 #define MTK_HSDMA_GLO_TX_DMA BIT(0)
69 #define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \
70 MTK_HSDMA_GLO_RX_DMA)
71 #define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \
72 MTK_HSDMA_GLO_TX_BUSY)
73 #define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \
74 MTK_HSDMA_GLO_RX_DMA | \
75 MTK_HSDMA_TX_WB_DDONE | \
76 MTK_HSDMA_BURST_64BYTES | \
77 MTK_HSDMA_GLO_MULTI_DMA)
78
79
80 #define MTK_HSDMA_RESET 0x208
81 #define MTK_HSDMA_RST_TX BIT(0)
82 #define MTK_HSDMA_RST_RX BIT(16)
83
84
85 #define MTK_HSDMA_DLYINT 0x20c
86 #define MTK_HSDMA_RXDLY_INT_EN BIT(15)
87
88
89 #define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8)
90
91
92 #define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f)
93 #define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \
94 MTK_HSDMA_RXMAX_PINT(20) | \
95 MTK_HSDMA_RXMAX_PTIME(20))
96 #define MTK_HSDMA_INT_STATUS 0x220
97 #define MTK_HSDMA_INT_ENABLE 0x228
98 #define MTK_HSDMA_INT_RXDONE BIT(16)
99
100 enum mtk_hsdma_vdesc_flag {
101 MTK_HSDMA_VDESC_FINISHED = 0x01,
102 };
103
104 #define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED)
105
106
107
108
109
110
111
112
113
114
115 struct mtk_hsdma_pdesc {
116 __le32 desc1;
117 __le32 desc2;
118 __le32 desc3;
119 __le32 desc4;
120 } __packed __aligned(4);
121
122
123
124
125
126
127
128
129
130
131 struct mtk_hsdma_vdesc {
132 struct virt_dma_desc vd;
133 size_t len;
134 size_t residue;
135 dma_addr_t dest;
136 dma_addr_t src;
137 };
138
139
140
141
142
143
144
145
146
147 struct mtk_hsdma_cb {
148 struct virt_dma_desc *vd;
149 enum mtk_hsdma_vdesc_flag flag;
150 };
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165 struct mtk_hsdma_ring {
166 struct mtk_hsdma_pdesc *txd;
167 struct mtk_hsdma_pdesc *rxd;
168 struct mtk_hsdma_cb *cb;
169 dma_addr_t tphys;
170 dma_addr_t rphys;
171 u16 cur_tptr;
172 u16 cur_rptr;
173 };
174
175
176
177
178
179
180
181
182
183
184
185 struct mtk_hsdma_pchan {
186 struct mtk_hsdma_ring ring;
187 size_t sz_ring;
188 atomic_t nr_free;
189 };
190
191
192
193
194
195
196
197
198
199
200 struct mtk_hsdma_vchan {
201 struct virt_dma_chan vc;
202 struct completion issue_completion;
203 bool issue_synchronize;
204 struct list_head desc_hw_processing;
205 };
206
207
208
209
210
211
212 struct mtk_hsdma_soc {
213 __le32 ddone;
214 __le32 ls0;
215 };
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 struct mtk_hsdma_device {
233 struct dma_device ddev;
234 void __iomem *base;
235 struct clk *clk;
236 u32 irq;
237
238 u32 dma_requests;
239 struct mtk_hsdma_vchan *vc;
240 struct mtk_hsdma_pchan *pc;
241 refcount_t pc_refcnt;
242
243
244 spinlock_t lock;
245
246 const struct mtk_hsdma_soc *soc;
247 };
248
249 static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan)
250 {
251 return container_of(chan->device, struct mtk_hsdma_device, ddev);
252 }
253
254 static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan)
255 {
256 return container_of(chan, struct mtk_hsdma_vchan, vc.chan);
257 }
258
259 static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd)
260 {
261 return container_of(vd, struct mtk_hsdma_vdesc, vd);
262 }
263
264 static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma)
265 {
266 return hsdma->ddev.dev;
267 }
268
269 static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg)
270 {
271 return readl(hsdma->base + reg);
272 }
273
274 static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
275 {
276 writel(val, hsdma->base + reg);
277 }
278
279 static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg,
280 u32 mask, u32 set)
281 {
282 u32 val;
283
284 val = mtk_dma_read(hsdma, reg);
285 val &= ~mask;
286 val |= set;
287 mtk_dma_write(hsdma, reg, val);
288 }
289
290 static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
291 {
292 mtk_dma_rmw(hsdma, reg, 0, val);
293 }
294
295 static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
296 {
297 mtk_dma_rmw(hsdma, reg, val, 0);
298 }
299
300 static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd)
301 {
302 kfree(container_of(vd, struct mtk_hsdma_vdesc, vd));
303 }
304
305 static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma)
306 {
307 u32 status = 0;
308
309 return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status,
310 !(status & MTK_HSDMA_GLO_BUSY),
311 MTK_HSDMA_USEC_POLL,
312 MTK_HSDMA_TIMEOUT_POLL);
313 }
314
315 static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
316 struct mtk_hsdma_pchan *pc)
317 {
318 struct mtk_hsdma_ring *ring = &pc->ring;
319 int err;
320
321 memset(pc, 0, sizeof(*pc));
322
323
324
325
326
327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
328 ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
329 &ring->tphys, GFP_NOWAIT);
330 if (!ring->txd)
331 return -ENOMEM;
332
333 ring->rxd = &ring->txd[MTK_DMA_SIZE];
334 ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd);
335 ring->cur_tptr = 0;
336 ring->cur_rptr = MTK_DMA_SIZE - 1;
337
338 ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT);
339 if (!ring->cb) {
340 err = -ENOMEM;
341 goto err_free_dma;
342 }
343
344 atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1);
345
346
347 mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
348 err = mtk_hsdma_busy_wait(hsdma);
349 if (err)
350 goto err_free_cb;
351
352
353 mtk_dma_set(hsdma, MTK_HSDMA_RESET,
354 MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
355 mtk_dma_clr(hsdma, MTK_HSDMA_RESET,
356 MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
357
358
359 mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys);
360 mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE);
361 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
362 mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0);
363 mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys);
364 mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE);
365 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr);
366 mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0);
367
368
369 mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
370
371
372 mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT);
373
374
375 mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
376
377 return 0;
378
379 err_free_cb:
380 kfree(ring->cb);
381
382 err_free_dma:
383 dma_free_coherent(hsdma2dev(hsdma),
384 pc->sz_ring, ring->txd, ring->tphys);
385 return err;
386 }
387
388 static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma,
389 struct mtk_hsdma_pchan *pc)
390 {
391 struct mtk_hsdma_ring *ring = &pc->ring;
392
393
394 mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
395 mtk_hsdma_busy_wait(hsdma);
396
397
398 mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
399 mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0);
400 mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0);
401 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0);
402 mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0);
403 mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0);
404 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1);
405
406 kfree(ring->cb);
407
408 dma_free_coherent(hsdma2dev(hsdma),
409 pc->sz_ring, ring->txd, ring->tphys);
410 }
411
412 static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma,
413 struct mtk_hsdma_pchan *pc,
414 struct mtk_hsdma_vdesc *hvd)
415 {
416 struct mtk_hsdma_ring *ring = &pc->ring;
417 struct mtk_hsdma_pdesc *txd, *rxd;
418 u16 reserved, prev, tlen, num_sgs;
419 unsigned long flags;
420
421
422 spin_lock_irqsave(&hsdma->lock, flags);
423
424
425
426
427
428 num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN);
429 reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free));
430
431 if (!reserved) {
432 spin_unlock_irqrestore(&hsdma->lock, flags);
433 return -ENOSPC;
434 }
435
436 atomic_sub(reserved, &pc->nr_free);
437
438 while (reserved--) {
439
440 tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ?
441 MTK_HSDMA_MAX_LEN : hvd->len;
442
443
444
445
446
447
448
449
450 txd = &ring->txd[ring->cur_tptr];
451 WRITE_ONCE(txd->desc1, hvd->src);
452 WRITE_ONCE(txd->desc2,
453 hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen));
454
455 rxd = &ring->rxd[ring->cur_tptr];
456 WRITE_ONCE(rxd->desc1, hvd->dest);
457 WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen));
458
459
460 ring->cb[ring->cur_tptr].vd = &hvd->vd;
461
462
463 ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr,
464 MTK_DMA_SIZE);
465
466
467 hvd->src += tlen;
468 hvd->dest += tlen;
469 hvd->len -= tlen;
470 }
471
472
473
474
475
476 if (!hvd->len) {
477 prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE);
478 ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
479 }
480
481
482 wmb();
483
484
485
486
487
488 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
489
490 spin_unlock_irqrestore(&hsdma->lock, flags);
491
492 return 0;
493 }
494
495 static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma,
496 struct mtk_hsdma_vchan *hvc)
497 {
498 struct virt_dma_desc *vd, *vd2;
499 int err;
500
501 lockdep_assert_held(&hvc->vc.lock);
502
503 list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) {
504 struct mtk_hsdma_vdesc *hvd;
505
506 hvd = to_hsdma_vdesc(vd);
507
508
509 err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd);
510
511
512
513
514
515
516
517
518 if (err == -ENOSPC || hvd->len > 0)
519 break;
520
521
522
523
524
525
526
527
528
529 list_move_tail(&vd->node, &hvc->desc_hw_processing);
530 }
531 }
532
533 static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
534 {
535 struct mtk_hsdma_vchan *hvc;
536 struct mtk_hsdma_pdesc *rxd;
537 struct mtk_hsdma_vdesc *hvd;
538 struct mtk_hsdma_pchan *pc;
539 struct mtk_hsdma_cb *cb;
540 int i = MTK_DMA_SIZE;
541 __le32 desc2;
542 u32 status;
543 u16 next;
544
545
546 status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS);
547 if (unlikely(!(status & MTK_HSDMA_INT_RXDONE)))
548 goto rx_done;
549
550 pc = hsdma->pc;
551
552
553
554
555
556
557
558
559 while (i--) {
560 next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr,
561 MTK_DMA_SIZE);
562 rxd = &pc->ring.rxd[next];
563
564
565
566
567
568 desc2 = READ_ONCE(rxd->desc2);
569 if (!(desc2 & hsdma->soc->ddone))
570 break;
571
572 cb = &pc->ring.cb[next];
573 if (unlikely(!cb->vd)) {
574 dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n");
575 break;
576 }
577
578
579 hvd = to_hsdma_vdesc(cb->vd);
580 hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2);
581
582
583 if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) {
584 hvc = to_hsdma_vchan(cb->vd->tx.chan);
585
586 spin_lock(&hvc->vc.lock);
587
588
589 list_del(&cb->vd->node);
590
591
592 vchan_cookie_complete(cb->vd);
593
594 if (hvc->issue_synchronize &&
595 list_empty(&hvc->desc_hw_processing)) {
596 complete(&hvc->issue_completion);
597 hvc->issue_synchronize = false;
598 }
599 spin_unlock(&hvc->vc.lock);
600
601 cb->flag = 0;
602 }
603
604 cb->vd = 0;
605
606
607
608
609
610 WRITE_ONCE(rxd->desc1, 0);
611 WRITE_ONCE(rxd->desc2, 0);
612 pc->ring.cur_rptr = next;
613
614
615 atomic_inc(&pc->nr_free);
616 }
617
618
619 wmb();
620
621
622 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr);
623
624
625
626
627
628
629 if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1)
630 mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status);
631
632
633 for (i = 0; i < hsdma->dma_requests; i++) {
634 hvc = &hsdma->vc[i];
635 spin_lock(&hvc->vc.lock);
636 mtk_hsdma_issue_vchan_pending(hsdma, hvc);
637 spin_unlock(&hvc->vc.lock);
638 }
639
640 rx_done:
641
642 mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
643 }
644
645 static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
646 {
647 struct mtk_hsdma_device *hsdma = devid;
648
649
650
651
652
653 mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
654
655 mtk_hsdma_free_rooms_in_ring(hsdma);
656
657 return IRQ_HANDLED;
658 }
659
660 static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c,
661 dma_cookie_t cookie)
662 {
663 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
664 struct virt_dma_desc *vd;
665
666 list_for_each_entry(vd, &hvc->desc_hw_processing, node)
667 if (vd->tx.cookie == cookie)
668 return vd;
669
670 list_for_each_entry(vd, &hvc->vc.desc_issued, node)
671 if (vd->tx.cookie == cookie)
672 return vd;
673
674 return NULL;
675 }
676
677 static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
678 dma_cookie_t cookie,
679 struct dma_tx_state *txstate)
680 {
681 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
682 struct mtk_hsdma_vdesc *hvd;
683 struct virt_dma_desc *vd;
684 enum dma_status ret;
685 unsigned long flags;
686 size_t bytes = 0;
687
688 ret = dma_cookie_status(c, cookie, txstate);
689 if (ret == DMA_COMPLETE || !txstate)
690 return ret;
691
692 spin_lock_irqsave(&hvc->vc.lock, flags);
693 vd = mtk_hsdma_find_active_desc(c, cookie);
694 spin_unlock_irqrestore(&hvc->vc.lock, flags);
695
696 if (vd) {
697 hvd = to_hsdma_vdesc(vd);
698 bytes = hvd->residue;
699 }
700
701 dma_set_residue(txstate, bytes);
702
703 return ret;
704 }
705
706 static void mtk_hsdma_issue_pending(struct dma_chan *c)
707 {
708 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
709 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
710 unsigned long flags;
711
712 spin_lock_irqsave(&hvc->vc.lock, flags);
713
714 if (vchan_issue_pending(&hvc->vc))
715 mtk_hsdma_issue_vchan_pending(hsdma, hvc);
716
717 spin_unlock_irqrestore(&hvc->vc.lock, flags);
718 }
719
720 static struct dma_async_tx_descriptor *
721 mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
722 dma_addr_t src, size_t len, unsigned long flags)
723 {
724 struct mtk_hsdma_vdesc *hvd;
725
726 hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT);
727 if (!hvd)
728 return NULL;
729
730 hvd->len = len;
731 hvd->residue = len;
732 hvd->src = src;
733 hvd->dest = dest;
734
735 return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags);
736 }
737
738 static int mtk_hsdma_free_inactive_desc(struct dma_chan *c)
739 {
740 struct virt_dma_chan *vc = to_virt_chan(c);
741 unsigned long flags;
742 LIST_HEAD(head);
743
744 spin_lock_irqsave(&vc->lock, flags);
745 list_splice_tail_init(&vc->desc_allocated, &head);
746 list_splice_tail_init(&vc->desc_submitted, &head);
747 list_splice_tail_init(&vc->desc_issued, &head);
748 spin_unlock_irqrestore(&vc->lock, flags);
749
750
751 vchan_dma_desc_free_list(vc, &head);
752
753 return 0;
754 }
755
756 static void mtk_hsdma_free_active_desc(struct dma_chan *c)
757 {
758 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
759 bool sync_needed = false;
760
761
762
763
764
765
766 spin_lock(&hvc->vc.lock);
767 if (!list_empty(&hvc->desc_hw_processing)) {
768 hvc->issue_synchronize = true;
769 sync_needed = true;
770 }
771 spin_unlock(&hvc->vc.lock);
772
773 if (sync_needed)
774 wait_for_completion(&hvc->issue_completion);
775
776
777
778
779 WARN_ONCE(!list_empty(&hvc->desc_hw_processing),
780 "Desc pending still in list desc_hw_processing\n");
781
782
783 vchan_synchronize(&hvc->vc);
784
785 WARN_ONCE(!list_empty(&hvc->vc.desc_completed),
786 "Desc pending still in list desc_completed\n");
787 }
788
789 static int mtk_hsdma_terminate_all(struct dma_chan *c)
790 {
791
792
793
794
795 mtk_hsdma_free_inactive_desc(c);
796
797
798
799
800
801
802
803 mtk_hsdma_free_active_desc(c);
804
805 return 0;
806 }
807
808 static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c)
809 {
810 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
811 int err;
812
813
814
815
816
817
818 if (!refcount_read(&hsdma->pc_refcnt)) {
819 err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc);
820 if (err)
821 return err;
822
823
824
825
826 refcount_set(&hsdma->pc_refcnt, 1);
827 } else {
828 refcount_inc(&hsdma->pc_refcnt);
829 }
830
831 return 0;
832 }
833
834 static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
835 {
836 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
837
838
839 mtk_hsdma_terminate_all(c);
840
841
842 if (!refcount_dec_and_test(&hsdma->pc_refcnt))
843 return;
844
845 mtk_hsdma_free_pchan(hsdma, hsdma->pc);
846 }
847
848 static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma)
849 {
850 int err;
851
852 pm_runtime_enable(hsdma2dev(hsdma));
853 pm_runtime_get_sync(hsdma2dev(hsdma));
854
855 err = clk_prepare_enable(hsdma->clk);
856 if (err)
857 return err;
858
859 mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
860 mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT);
861
862 return 0;
863 }
864
865 static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma)
866 {
867 mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0);
868
869 clk_disable_unprepare(hsdma->clk);
870
871 pm_runtime_put_sync(hsdma2dev(hsdma));
872 pm_runtime_disable(hsdma2dev(hsdma));
873
874 return 0;
875 }
876
877 static const struct mtk_hsdma_soc mt7623_soc = {
878 .ddone = BIT(31),
879 .ls0 = BIT(30),
880 };
881
882 static const struct mtk_hsdma_soc mt7622_soc = {
883 .ddone = BIT(15),
884 .ls0 = BIT(14),
885 };
886
887 static const struct of_device_id mtk_hsdma_match[] = {
888 { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc},
889 { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc},
890 { }
891 };
892 MODULE_DEVICE_TABLE(of, mtk_hsdma_match);
893
894 static int mtk_hsdma_probe(struct platform_device *pdev)
895 {
896 struct mtk_hsdma_device *hsdma;
897 struct mtk_hsdma_vchan *vc;
898 struct dma_device *dd;
899 struct resource *res;
900 int i, err;
901
902 hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
903 if (!hsdma)
904 return -ENOMEM;
905
906 dd = &hsdma->ddev;
907
908 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
909 hsdma->base = devm_ioremap_resource(&pdev->dev, res);
910 if (IS_ERR(hsdma->base))
911 return PTR_ERR(hsdma->base);
912
913 hsdma->soc = of_device_get_match_data(&pdev->dev);
914 if (!hsdma->soc) {
915 dev_err(&pdev->dev, "No device match found\n");
916 return -ENODEV;
917 }
918
919 hsdma->clk = devm_clk_get(&pdev->dev, "hsdma");
920 if (IS_ERR(hsdma->clk)) {
921 dev_err(&pdev->dev, "No clock for %s\n",
922 dev_name(&pdev->dev));
923 return PTR_ERR(hsdma->clk);
924 }
925
926 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
927 if (!res) {
928 dev_err(&pdev->dev, "No irq resource for %s\n",
929 dev_name(&pdev->dev));
930 return -EINVAL;
931 }
932 hsdma->irq = res->start;
933
934 refcount_set(&hsdma->pc_refcnt, 0);
935 spin_lock_init(&hsdma->lock);
936
937 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
938
939 dd->copy_align = MTK_HSDMA_ALIGN_SIZE;
940 dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources;
941 dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
942 dd->device_tx_status = mtk_hsdma_tx_status;
943 dd->device_issue_pending = mtk_hsdma_issue_pending;
944 dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
945 dd->device_terminate_all = mtk_hsdma_terminate_all;
946 dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
947 dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
948 dd->directions = BIT(DMA_MEM_TO_MEM);
949 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
950 dd->dev = &pdev->dev;
951 INIT_LIST_HEAD(&dd->channels);
952
953 hsdma->dma_requests = MTK_HSDMA_NR_VCHANS;
954 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
955 "dma-requests",
956 &hsdma->dma_requests)) {
957 dev_info(&pdev->dev,
958 "Using %u as missing dma-requests property\n",
959 MTK_HSDMA_NR_VCHANS);
960 }
961
962 hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS,
963 sizeof(*hsdma->pc), GFP_KERNEL);
964 if (!hsdma->pc)
965 return -ENOMEM;
966
967 hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests,
968 sizeof(*hsdma->vc), GFP_KERNEL);
969 if (!hsdma->vc)
970 return -ENOMEM;
971
972 for (i = 0; i < hsdma->dma_requests; i++) {
973 vc = &hsdma->vc[i];
974 vc->vc.desc_free = mtk_hsdma_vdesc_free;
975 vchan_init(&vc->vc, dd);
976 init_completion(&vc->issue_completion);
977 INIT_LIST_HEAD(&vc->desc_hw_processing);
978 }
979
980 err = dma_async_device_register(dd);
981 if (err)
982 return err;
983
984 err = of_dma_controller_register(pdev->dev.of_node,
985 of_dma_xlate_by_chan_id, hsdma);
986 if (err) {
987 dev_err(&pdev->dev,
988 "MediaTek HSDMA OF registration failed %d\n", err);
989 goto err_unregister;
990 }
991
992 mtk_hsdma_hw_init(hsdma);
993
994 err = devm_request_irq(&pdev->dev, hsdma->irq,
995 mtk_hsdma_irq, 0,
996 dev_name(&pdev->dev), hsdma);
997 if (err) {
998 dev_err(&pdev->dev,
999 "request_irq failed with err %d\n", err);
1000 goto err_unregister;
1001 }
1002
1003 platform_set_drvdata(pdev, hsdma);
1004
1005 dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n");
1006
1007 return 0;
1008
1009 err_unregister:
1010 dma_async_device_unregister(dd);
1011
1012 return err;
1013 }
1014
1015 static int mtk_hsdma_remove(struct platform_device *pdev)
1016 {
1017 struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev);
1018 struct mtk_hsdma_vchan *vc;
1019 int i;
1020
1021
1022 for (i = 0; i < hsdma->dma_requests; i++) {
1023 vc = &hsdma->vc[i];
1024
1025 list_del(&vc->vc.chan.device_node);
1026 tasklet_kill(&vc->vc.task);
1027 }
1028
1029
1030 mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
1031
1032
1033 synchronize_irq(hsdma->irq);
1034
1035
1036 mtk_hsdma_hw_deinit(hsdma);
1037
1038 dma_async_device_unregister(&hsdma->ddev);
1039 of_dma_controller_free(pdev->dev.of_node);
1040
1041 return 0;
1042 }
1043
1044 static struct platform_driver mtk_hsdma_driver = {
1045 .probe = mtk_hsdma_probe,
1046 .remove = mtk_hsdma_remove,
1047 .driver = {
1048 .name = KBUILD_MODNAME,
1049 .of_match_table = mtk_hsdma_match,
1050 },
1051 };
1052 module_platform_driver(mtk_hsdma_driver);
1053
1054 MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver");
1055 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1056 MODULE_LICENSE("GPL v2");