This source file includes following definitions.
- cpu_drain_writebuffer
- cppi_bd_alloc
- cppi_bd_free
- cppi_reset_rx
- cppi_reset_tx
- cppi_pool_init
- cppi_pool_free
- cppi_controller_start
- cppi_controller_stop
- core_rxirq_disable
- core_rxirq_enable
- cppi_channel_allocate
- cppi_channel_release
- cppi_dump_rx
- cppi_dump_tx
- cppi_rndis_update
- cppi_dump_rxbd
- cppi_dump_rxq
- cppi_autoreq_update
- cppi_next_tx_segment
- cppi_next_rx_segment
- cppi_channel_program
- cppi_rx_scan
- cppi_interrupt
- cppi_dma_controller_create
- cppi_dma_controller_destroy
- cppi_channel_abort
1
2
3
4
5
6
7
8
9
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/usb.h>
14
15 #include "musb_core.h"
16 #include "musb_debug.h"
17 #include "cppi_dma.h"
18 #include "davinci.h"
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #define NUM_TXCHAN_BD 64
48 #define NUM_RXCHAN_BD 64
49
50 static inline void cpu_drain_writebuffer(void)
51 {
52 wmb();
53 #ifdef CONFIG_CPU_ARM926T
54
55
56
57 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
58 #endif
59 }
60
61 static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
62 {
63 struct cppi_descriptor *bd = c->freelist;
64
65 if (bd)
66 c->freelist = bd->next;
67 return bd;
68 }
69
70 static inline void
71 cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
72 {
73 if (!bd)
74 return;
75 bd->next = c->freelist;
76 c->freelist = bd;
77 }
78
79
80
81
82
83
84
85
86 static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
87 {
88 musb_writel(&rx->rx_skipbytes, 0, 0);
89 musb_writel(&rx->rx_head, 0, 0);
90 musb_writel(&rx->rx_sop, 0, 0);
91 musb_writel(&rx->rx_current, 0, 0);
92 musb_writel(&rx->rx_buf_current, 0, 0);
93 musb_writel(&rx->rx_len_len, 0, 0);
94 musb_writel(&rx->rx_cnt_cnt, 0, 0);
95 }
96
97
98 static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
99 {
100 musb_writel(&tx->tx_head, 0, 0);
101 musb_writel(&tx->tx_buf, 0, 0);
102 musb_writel(&tx->tx_current, 0, 0);
103 musb_writel(&tx->tx_buf_current, 0, 0);
104 musb_writel(&tx->tx_info, 0, 0);
105 musb_writel(&tx->tx_rem_len, 0, 0);
106
107 musb_writel(&tx->tx_complete, 0, ptr);
108 }
109
110 static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
111 {
112 int j;
113
114
115 c->head = NULL;
116 c->tail = NULL;
117 c->last_processed = NULL;
118 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
119 c->controller = cppi;
120 c->is_rndis = 0;
121 c->freelist = NULL;
122
123
124 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
125 struct cppi_descriptor *bd;
126 dma_addr_t dma;
127
128 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
129 bd->dma = dma;
130 cppi_bd_free(c, bd);
131 }
132 }
133
134 static int cppi_channel_abort(struct dma_channel *);
135
136 static void cppi_pool_free(struct cppi_channel *c)
137 {
138 struct cppi *cppi = c->controller;
139 struct cppi_descriptor *bd;
140
141 (void) cppi_channel_abort(&c->channel);
142 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
143 c->controller = NULL;
144
145
146 bd = c->last_processed;
147 do {
148 if (bd)
149 dma_pool_free(cppi->pool, bd, bd->dma);
150 bd = cppi_bd_alloc(c);
151 } while (bd);
152 c->last_processed = NULL;
153 }
154
155 static void cppi_controller_start(struct cppi *controller)
156 {
157 void __iomem *tibase;
158 int i;
159
160
161 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
162 controller->tx[i].transmit = true;
163 controller->tx[i].index = i;
164 }
165 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
166 controller->rx[i].transmit = false;
167 controller->rx[i].index = i;
168 }
169
170
171 for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
172 cppi_pool_init(controller, controller->tx + i);
173 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
174 cppi_pool_init(controller, controller->rx + i);
175
176 tibase = controller->tibase;
177 INIT_LIST_HEAD(&controller->tx_complete);
178
179
180 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
181 struct cppi_channel *tx_ch = controller->tx + i;
182 struct cppi_tx_stateram __iomem *tx;
183
184 INIT_LIST_HEAD(&tx_ch->tx_complete);
185
186 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
187 tx_ch->state_ram = tx;
188 cppi_reset_tx(tx, 0);
189 }
190 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
191 struct cppi_channel *rx_ch = controller->rx + i;
192 struct cppi_rx_stateram __iomem *rx;
193
194 INIT_LIST_HEAD(&rx_ch->tx_complete);
195
196 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
197 rx_ch->state_ram = rx;
198 cppi_reset_rx(rx);
199 }
200
201
202 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
203 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
204 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
205 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
206
207
208 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
209 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
210
211
212 musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
213 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
214 }
215
216
217
218
219
220
221
222 static void cppi_controller_stop(struct cppi *controller)
223 {
224 void __iomem *tibase;
225 int i;
226 struct musb *musb;
227
228 musb = controller->controller.musb;
229
230 tibase = controller->tibase;
231
232 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
233 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
234 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
235 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
236
237 musb_dbg(musb, "Tearing down RX and TX Channels");
238 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
239
240 controller->tx[i].last_processed = NULL;
241 cppi_pool_free(controller->tx + i);
242 }
243 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
244 cppi_pool_free(controller->rx + i);
245
246
247
248
249
250
251 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
252 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
253 }
254
255
256
257
258
259
260
261
262
263 static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
264 {
265 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
266 }
267
268 static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
269 {
270 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
271 }
272
273
274
275
276
277
278
279
280 static struct dma_channel *
281 cppi_channel_allocate(struct dma_controller *c,
282 struct musb_hw_ep *ep, u8 transmit)
283 {
284 struct cppi *controller;
285 u8 index;
286 struct cppi_channel *cppi_ch;
287 void __iomem *tibase;
288 struct musb *musb;
289
290 controller = container_of(c, struct cppi, controller);
291 tibase = controller->tibase;
292 musb = c->musb;
293
294
295 index = ep->epnum - 1;
296
297
298
299
300 if (transmit) {
301 if (index >= ARRAY_SIZE(controller->tx)) {
302 musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
303 return NULL;
304 }
305 cppi_ch = controller->tx + index;
306 } else {
307 if (index >= ARRAY_SIZE(controller->rx)) {
308 musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
309 return NULL;
310 }
311 cppi_ch = controller->rx + index;
312 core_rxirq_disable(tibase, ep->epnum);
313 }
314
315
316
317
318 if (cppi_ch->hw_ep)
319 musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
320 index, transmit ? 'T' : 'R', cppi_ch);
321 cppi_ch->hw_ep = ep;
322 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
323 cppi_ch->channel.max_len = 0x7fffffff;
324
325 musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
326 return &cppi_ch->channel;
327 }
328
329
330 static void cppi_channel_release(struct dma_channel *channel)
331 {
332 struct cppi_channel *c;
333 void __iomem *tibase;
334
335
336
337 c = container_of(channel, struct cppi_channel, channel);
338 tibase = c->controller->tibase;
339 if (!c->hw_ep)
340 musb_dbg(c->controller->controller.musb,
341 "releasing idle DMA channel %p", c);
342 else if (!c->transmit)
343 core_rxirq_enable(tibase, c->index + 1);
344
345
346 c->hw_ep = NULL;
347 channel->status = MUSB_DMA_STATUS_UNKNOWN;
348 }
349
350
351 static void
352 cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
353 {
354 void __iomem *base = c->controller->mregs;
355 struct cppi_rx_stateram __iomem *rx = c->state_ram;
356
357 musb_ep_select(base, c->index + 1);
358
359 musb_dbg(c->controller->controller.musb,
360 "RX DMA%d%s: %d left, csr %04x, "
361 "%08x H%08x S%08x C%08x, "
362 "B%08x L%08x %08x .. %08x",
363 c->index, tag,
364 musb_readl(c->controller->tibase,
365 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
366 musb_readw(c->hw_ep->regs, MUSB_RXCSR),
367
368 musb_readl(&rx->rx_skipbytes, 0),
369 musb_readl(&rx->rx_head, 0),
370 musb_readl(&rx->rx_sop, 0),
371 musb_readl(&rx->rx_current, 0),
372
373 musb_readl(&rx->rx_buf_current, 0),
374 musb_readl(&rx->rx_len_len, 0),
375 musb_readl(&rx->rx_cnt_cnt, 0),
376 musb_readl(&rx->rx_complete, 0)
377 );
378 }
379
380
381 static void
382 cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
383 {
384 void __iomem *base = c->controller->mregs;
385 struct cppi_tx_stateram __iomem *tx = c->state_ram;
386
387 musb_ep_select(base, c->index + 1);
388
389 musb_dbg(c->controller->controller.musb,
390 "TX DMA%d%s: csr %04x, "
391 "H%08x S%08x C%08x %08x, "
392 "F%08x L%08x .. %08x",
393 c->index, tag,
394 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
395
396 musb_readl(&tx->tx_head, 0),
397 musb_readl(&tx->tx_buf, 0),
398 musb_readl(&tx->tx_current, 0),
399 musb_readl(&tx->tx_buf_current, 0),
400
401 musb_readl(&tx->tx_info, 0),
402 musb_readl(&tx->tx_rem_len, 0),
403
404 musb_readl(&tx->tx_complete, 0)
405 );
406 }
407
408
409 static inline void
410 cppi_rndis_update(struct cppi_channel *c, int is_rx,
411 void __iomem *tibase, int is_rndis)
412 {
413
414 if (c->is_rndis != is_rndis) {
415 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
416 u32 temp = 1 << (c->index);
417
418 if (is_rx)
419 temp <<= 16;
420 if (is_rndis)
421 value |= temp;
422 else
423 value &= ~temp;
424 musb_writel(tibase, DAVINCI_RNDIS_REG, value);
425 c->is_rndis = is_rndis;
426 }
427 }
428
429 static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
430 {
431 pr_debug("RXBD/%s %08x: "
432 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
433 tag, bd->dma,
434 bd->hw_next, bd->hw_bufp, bd->hw_off_len,
435 bd->hw_options);
436 }
437
438 static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
439 {
440 struct cppi_descriptor *bd;
441
442 cppi_dump_rx(level, rx, tag);
443 if (rx->last_processed)
444 cppi_dump_rxbd("last", rx->last_processed);
445 for (bd = rx->head; bd; bd = bd->next)
446 cppi_dump_rxbd("active", bd);
447 }
448
449
450
451
452
453 static inline int cppi_autoreq_update(struct cppi_channel *rx,
454 void __iomem *tibase, int onepacket, unsigned n_bds)
455 {
456 u32 val;
457
458 #ifdef RNDIS_RX_IS_USABLE
459 u32 tmp;
460
461
462
463 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
464 val = tmp & ~((0x3) << (rx->index * 2));
465
466
467
468
469 if (!onepacket) {
470 #if 0
471
472 val |= ((0x3) << (rx->index * 2));
473 n_bds--;
474 #else
475
476 val |= ((0x1) << (rx->index * 2));
477 #endif
478 }
479
480 if (val != tmp) {
481 int n = 100;
482
483
484 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
485 do {
486 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
487 if (tmp == val)
488 break;
489 cpu_relax();
490 } while (n-- > 0);
491 }
492 #endif
493
494
495 if (n_bds && rx->channel.actual_len) {
496 void __iomem *regs = rx->hw_ep->regs;
497
498 val = musb_readw(regs, MUSB_RXCSR);
499 if (!(val & MUSB_RXCSR_H_REQPKT)) {
500 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
501 musb_writew(regs, MUSB_RXCSR, val);
502
503 val = musb_readw(regs, MUSB_RXCSR);
504 }
505 }
506 return n_bds;
507 }
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 static void
561 cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
562 {
563 unsigned maxpacket = tx->maxpacket;
564 dma_addr_t addr = tx->buf_dma + tx->offset;
565 size_t length = tx->buf_len - tx->offset;
566 struct cppi_descriptor *bd;
567 unsigned n_bds;
568 unsigned i;
569 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
570 int rndis;
571
572
573
574
575
576
577 rndis = (maxpacket & 0x3f) == 0
578 && length > maxpacket
579 && length < 0xffff
580 && (length % maxpacket) != 0;
581
582 if (rndis) {
583 maxpacket = length;
584 n_bds = 1;
585 } else {
586 if (length)
587 n_bds = DIV_ROUND_UP(length, maxpacket);
588 else
589 n_bds = 1;
590 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
591 length = min(n_bds * maxpacket, length);
592 }
593
594 musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
595 tx->index,
596 maxpacket,
597 rndis ? "rndis" : "transparent",
598 n_bds,
599 (unsigned long long)addr, length);
600
601 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
602
603
604
605
606
607
608
609 bd = tx->freelist;
610 tx->head = bd;
611 tx->last_processed = NULL;
612
613
614
615
616
617
618
619
620
621 for (i = 0; i < n_bds; ) {
622 if (++i < n_bds && bd->next)
623 bd->hw_next = bd->next->dma;
624 else
625 bd->hw_next = 0;
626
627 bd->hw_bufp = tx->buf_dma + tx->offset;
628
629
630
631
632 if ((tx->offset + maxpacket) <= tx->buf_len) {
633 tx->offset += maxpacket;
634 bd->hw_off_len = maxpacket;
635 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
636 | CPPI_OWN_SET | maxpacket;
637 } else {
638
639 u32 partial_len;
640
641 partial_len = tx->buf_len - tx->offset;
642 tx->offset = tx->buf_len;
643 bd->hw_off_len = partial_len;
644
645 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
646 | CPPI_OWN_SET | partial_len;
647 if (partial_len == 0)
648 bd->hw_options |= CPPI_ZERO_SET;
649 }
650
651 musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
652 bd, bd->hw_next, bd->hw_bufp,
653 bd->hw_off_len, bd->hw_options);
654
655
656 tx->tail = bd;
657 bd = bd->next;
658 }
659
660
661 cpu_drain_writebuffer();
662
663
664 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
665
666 cppi_dump_tx(5, tx, "/S");
667 }
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740 static bool cppi_rx_rndis = 1;
741
742 module_param(cppi_rx_rndis, bool, 0);
743 MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 static void
762 cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
763 {
764 unsigned maxpacket = rx->maxpacket;
765 dma_addr_t addr = rx->buf_dma + rx->offset;
766 size_t length = rx->buf_len - rx->offset;
767 struct cppi_descriptor *bd, *tail;
768 unsigned n_bds;
769 unsigned i;
770 void __iomem *tibase = musb->ctrl_base;
771 int is_rndis = 0;
772 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
773 struct cppi_descriptor *d;
774
775 if (onepacket) {
776
777 n_bds = 1;
778
779
780 if (cppi_rx_rndis
781 && is_peripheral_active(musb)
782 && length > maxpacket
783 && (length & ~0xffff) == 0
784 && (length & 0x0fff) != 0
785 && (length & (maxpacket - 1)) == 0) {
786 maxpacket = length;
787 is_rndis = 1;
788 }
789 } else {
790
791 if (length > 0xffff) {
792 n_bds = 0xffff / maxpacket;
793 length = n_bds * maxpacket;
794 } else {
795 n_bds = DIV_ROUND_UP(length, maxpacket);
796 }
797 if (n_bds == 1)
798 onepacket = 1;
799 else
800 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
801 }
802
803
804
805
806
807
808 if (is_host_active(musb))
809 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
810
811 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
812
813 length = min(n_bds * maxpacket, length);
814
815 musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
816 "dma 0x%llx len %u %u/%u",
817 rx->index, maxpacket,
818 onepacket
819 ? (is_rndis ? "rndis" : "onepacket")
820 : "multipacket",
821 n_bds,
822 musb_readl(tibase,
823 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
824 & 0xffff,
825 (unsigned long long)addr, length,
826 rx->channel.actual_len, rx->buf_len);
827
828
829
830
831 bd = cppi_bd_alloc(rx);
832 rx->head = bd;
833
834
835 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
836 u32 bd_len;
837
838 if (i) {
839 bd = cppi_bd_alloc(rx);
840 if (!bd)
841 break;
842 tail->next = bd;
843 tail->hw_next = bd->dma;
844 }
845 bd->hw_next = 0;
846
847
848 if (maxpacket < length)
849 bd_len = maxpacket;
850 else
851 bd_len = length;
852
853 bd->hw_bufp = addr;
854 addr += bd_len;
855 rx->offset += bd_len;
856
857 bd->hw_off_len = (0 << 16) + bd_len;
858 bd->buflen = bd_len;
859
860 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
861 length -= bd_len;
862 }
863
864
865 if (!tail) {
866 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
867 return;
868 } else if (i < n_bds)
869 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
870
871 tail->next = NULL;
872 tail->hw_next = 0;
873
874 bd = rx->head;
875 rx->tail = tail;
876
877
878
879
880
881
882 bd->hw_options |= CPPI_SOP_SET;
883 tail->hw_options |= CPPI_EOP_SET;
884
885 for (d = rx->head; d; d = d->next)
886 cppi_dump_rxbd("S", d);
887
888
889 tail = rx->last_processed;
890 if (tail) {
891 tail->next = bd;
892 tail->hw_next = bd->dma;
893 }
894
895 core_rxirq_enable(tibase, rx->index + 1);
896
897
898 cpu_drain_writebuffer();
899
900
901
902
903 musb_writel(&rx_ram->rx_head, 0, bd->dma);
904
905
906
907
908
909 i = musb_readl(tibase,
910 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
911 & 0xffff;
912
913 if (!i)
914 musb_writel(tibase,
915 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
916 n_bds + 2);
917 else if (n_bds > (i - 3))
918 musb_writel(tibase,
919 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
920 n_bds - (i - 3));
921
922 i = musb_readl(tibase,
923 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
924 & 0xffff;
925 if (i < (2 + n_bds)) {
926 musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
927 rx->index, i, n_bds);
928 musb_writel(tibase,
929 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
930 n_bds + 2);
931 }
932
933 cppi_dump_rx(4, rx, "/S");
934 }
935
936
937
938
939
940
941
942
943
944
945
946
947 static int cppi_channel_program(struct dma_channel *ch,
948 u16 maxpacket, u8 mode,
949 dma_addr_t dma_addr, u32 len)
950 {
951 struct cppi_channel *cppi_ch;
952 struct cppi *controller;
953 struct musb *musb;
954
955 cppi_ch = container_of(ch, struct cppi_channel, channel);
956 controller = cppi_ch->controller;
957 musb = controller->controller.musb;
958
959 switch (ch->status) {
960 case MUSB_DMA_STATUS_BUS_ABORT:
961 case MUSB_DMA_STATUS_CORE_ABORT:
962
963 WARNING("%cX DMA%d not cleaned up after abort!\n",
964 cppi_ch->transmit ? 'T' : 'R',
965 cppi_ch->index);
966
967 break;
968 case MUSB_DMA_STATUS_BUSY:
969 WARNING("program active channel? %cX DMA%d\n",
970 cppi_ch->transmit ? 'T' : 'R',
971 cppi_ch->index);
972
973 break;
974 case MUSB_DMA_STATUS_UNKNOWN:
975 musb_dbg(musb, "%cX DMA%d not allocated!",
976 cppi_ch->transmit ? 'T' : 'R',
977 cppi_ch->index);
978
979 case MUSB_DMA_STATUS_FREE:
980 break;
981 }
982
983 ch->status = MUSB_DMA_STATUS_BUSY;
984
985
986 cppi_ch->buf_dma = dma_addr;
987 cppi_ch->offset = 0;
988 cppi_ch->maxpacket = maxpacket;
989 cppi_ch->buf_len = len;
990 cppi_ch->channel.actual_len = 0;
991
992
993 if (cppi_ch->transmit)
994 cppi_next_tx_segment(musb, cppi_ch);
995 else
996 cppi_next_rx_segment(musb, cppi_ch, mode);
997
998 return true;
999 }
1000
1001 static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1002 {
1003 struct cppi_channel *rx = &cppi->rx[ch];
1004 struct cppi_rx_stateram __iomem *state = rx->state_ram;
1005 struct cppi_descriptor *bd;
1006 struct cppi_descriptor *last = rx->last_processed;
1007 bool completed = false;
1008 bool acked = false;
1009 int i;
1010 dma_addr_t safe2ack;
1011 void __iomem *regs = rx->hw_ep->regs;
1012 struct musb *musb = cppi->controller.musb;
1013
1014 cppi_dump_rx(6, rx, "/K");
1015
1016 bd = last ? last->next : rx->head;
1017 if (!bd)
1018 return false;
1019
1020
1021 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
1022 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
1023 i++, bd = bd->next) {
1024 u16 len;
1025
1026
1027 rmb();
1028 if (!completed && (bd->hw_options & CPPI_OWN_SET))
1029 break;
1030
1031 musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
1032 "off.len %08x opt.len %08x (%d)",
1033 (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
1034 bd->hw_off_len, bd->hw_options,
1035 rx->channel.actual_len);
1036
1037
1038 if ((bd->hw_options & CPPI_SOP_SET) && !completed)
1039 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
1040 else
1041 len = 0;
1042
1043 if (bd->hw_options & CPPI_EOQ_MASK)
1044 completed = true;
1045
1046 if (!completed && len < bd->buflen) {
1047
1048
1049
1050
1051
1052 completed = true;
1053 musb_dbg(musb, "rx short %d/%d (%d)",
1054 len, bd->buflen,
1055 rx->channel.actual_len);
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 if (bd->dma == safe2ack) {
1067 musb_writel(&state->rx_complete, 0, safe2ack);
1068 safe2ack = musb_readl(&state->rx_complete, 0);
1069 acked = true;
1070 if (bd->dma == safe2ack)
1071 safe2ack = 0;
1072 }
1073
1074 rx->channel.actual_len += len;
1075
1076 cppi_bd_free(rx, last);
1077 last = bd;
1078
1079
1080 if (bd->hw_next == 0)
1081 completed = true;
1082 }
1083 rx->last_processed = last;
1084
1085
1086 if (!acked && last) {
1087 int csr;
1088
1089 if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
1090 musb_writel(&state->rx_complete, 0, safe2ack);
1091 if (safe2ack == 0) {
1092 cppi_bd_free(rx, last);
1093 rx->last_processed = NULL;
1094
1095
1096
1097
1098 WARN_ON(rx->head);
1099 }
1100 musb_ep_select(cppi->mregs, rx->index + 1);
1101 csr = musb_readw(regs, MUSB_RXCSR);
1102 if (csr & MUSB_RXCSR_DMAENAB) {
1103 musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
1104 rx->index,
1105 rx->head, rx->tail,
1106 rx->last_processed
1107 ? (unsigned long long)
1108 rx->last_processed->dma
1109 : 0,
1110 completed ? ", completed" : "",
1111 csr);
1112 cppi_dump_rxq(4, "/what?", rx);
1113 }
1114 }
1115 if (!completed) {
1116 int csr;
1117
1118 rx->head = bd;
1119
1120
1121
1122
1123 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1124 if (is_host_active(cppi->controller.musb)
1125 && bd
1126 && !(csr & MUSB_RXCSR_H_REQPKT)) {
1127 csr |= MUSB_RXCSR_H_REQPKT;
1128 musb_writew(regs, MUSB_RXCSR,
1129 MUSB_RXCSR_H_WZC_BITS | csr);
1130 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1131 }
1132 } else {
1133 rx->head = NULL;
1134 rx->tail = NULL;
1135 }
1136
1137 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
1138 return completed;
1139 }
1140
1141 irqreturn_t cppi_interrupt(int irq, void *dev_id)
1142 {
1143 struct musb *musb = dev_id;
1144 struct cppi *cppi;
1145 void __iomem *tibase;
1146 struct musb_hw_ep *hw_ep = NULL;
1147 u32 rx, tx;
1148 int i, index;
1149 unsigned long uninitialized_var(flags);
1150
1151 cppi = container_of(musb->dma_controller, struct cppi, controller);
1152 if (cppi->irq)
1153 spin_lock_irqsave(&musb->lock, flags);
1154
1155 tibase = musb->ctrl_base;
1156
1157 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
1158 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
1159
1160 if (!tx && !rx) {
1161 if (cppi->irq)
1162 spin_unlock_irqrestore(&musb->lock, flags);
1163 return IRQ_NONE;
1164 }
1165
1166 musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
1167
1168
1169 for (index = 0; tx; tx = tx >> 1, index++) {
1170 struct cppi_channel *tx_ch;
1171 struct cppi_tx_stateram __iomem *tx_ram;
1172 bool completed = false;
1173 struct cppi_descriptor *bd;
1174
1175 if (!(tx & 1))
1176 continue;
1177
1178 tx_ch = cppi->tx + index;
1179 tx_ram = tx_ch->state_ram;
1180
1181
1182
1183
1184
1185 cppi_dump_tx(5, tx_ch, "/E");
1186
1187 bd = tx_ch->head;
1188
1189
1190
1191
1192
1193 if (NULL == bd) {
1194 musb_dbg(musb, "null BD");
1195 musb_writel(&tx_ram->tx_complete, 0, 0);
1196 continue;
1197 }
1198
1199
1200 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
1201 i++, bd = bd->next) {
1202 u16 len;
1203
1204
1205 rmb();
1206 if (bd->hw_options & CPPI_OWN_SET)
1207 break;
1208
1209 musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
1210 bd, bd->hw_next, bd->hw_bufp,
1211 bd->hw_off_len, bd->hw_options);
1212
1213 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
1214 tx_ch->channel.actual_len += len;
1215
1216 tx_ch->last_processed = bd;
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 musb_writel(&tx_ram->tx_complete, 0, bd->dma);
1228
1229
1230 if (bd->hw_next == 0)
1231 completed = true;
1232 }
1233
1234
1235 if (completed) {
1236
1237
1238
1239 if (tx_ch->offset >= tx_ch->buf_len) {
1240 tx_ch->head = NULL;
1241 tx_ch->tail = NULL;
1242 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1243
1244 hw_ep = tx_ch->hw_ep;
1245
1246 musb_dma_completion(musb, index + 1, 1);
1247
1248 } else {
1249
1250
1251
1252 cppi_next_tx_segment(musb, tx_ch);
1253 }
1254 } else
1255 tx_ch->head = bd;
1256 }
1257
1258
1259 for (index = 0; rx; rx = rx >> 1, index++) {
1260
1261 if (rx & 1) {
1262 struct cppi_channel *rx_ch;
1263
1264 rx_ch = cppi->rx + index;
1265
1266
1267 if (!cppi_rx_scan(cppi, index))
1268 continue;
1269
1270
1271 if (rx_ch->channel.actual_len != rx_ch->buf_len
1272 && rx_ch->channel.actual_len
1273 == rx_ch->offset) {
1274 cppi_next_rx_segment(musb, rx_ch, 1);
1275 continue;
1276 }
1277
1278
1279 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1280
1281 hw_ep = rx_ch->hw_ep;
1282
1283 core_rxirq_disable(tibase, index + 1);
1284 musb_dma_completion(musb, index + 1, 0);
1285 }
1286 }
1287
1288
1289 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1290
1291 if (cppi->irq)
1292 spin_unlock_irqrestore(&musb->lock, flags);
1293
1294 return IRQ_HANDLED;
1295 }
1296 EXPORT_SYMBOL_GPL(cppi_interrupt);
1297
1298
1299 struct dma_controller *
1300 cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
1301 {
1302 struct cppi *controller;
1303 struct device *dev = musb->controller;
1304 struct platform_device *pdev = to_platform_device(dev);
1305 int irq = platform_get_irq_byname(pdev, "dma");
1306
1307 controller = kzalloc(sizeof *controller, GFP_KERNEL);
1308 if (!controller)
1309 return NULL;
1310
1311 controller->mregs = mregs;
1312 controller->tibase = mregs - DAVINCI_BASE_OFFSET;
1313
1314 controller->controller.musb = musb;
1315 controller->controller.channel_alloc = cppi_channel_allocate;
1316 controller->controller.channel_release = cppi_channel_release;
1317 controller->controller.channel_program = cppi_channel_program;
1318 controller->controller.channel_abort = cppi_channel_abort;
1319
1320
1321
1322
1323
1324
1325 controller->pool = dma_pool_create("cppi",
1326 controller->controller.musb->controller,
1327 sizeof(struct cppi_descriptor),
1328 CPPI_DESCRIPTOR_ALIGN, 0);
1329 if (!controller->pool) {
1330 kfree(controller);
1331 return NULL;
1332 }
1333
1334 if (irq > 0) {
1335 if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
1336 dev_err(dev, "request_irq %d failed!\n", irq);
1337 musb_dma_controller_destroy(&controller->controller);
1338 return NULL;
1339 }
1340 controller->irq = irq;
1341 }
1342
1343 cppi_controller_start(controller);
1344 return &controller->controller;
1345 }
1346 EXPORT_SYMBOL_GPL(cppi_dma_controller_create);
1347
1348
1349
1350
1351 void cppi_dma_controller_destroy(struct dma_controller *c)
1352 {
1353 struct cppi *cppi;
1354
1355 cppi = container_of(c, struct cppi, controller);
1356
1357 cppi_controller_stop(cppi);
1358
1359 if (cppi->irq)
1360 free_irq(cppi->irq, cppi->controller.musb);
1361
1362
1363 dma_pool_destroy(cppi->pool);
1364
1365 kfree(cppi);
1366 }
1367 EXPORT_SYMBOL_GPL(cppi_dma_controller_destroy);
1368
1369
1370
1371
1372 static int cppi_channel_abort(struct dma_channel *channel)
1373 {
1374 struct cppi_channel *cppi_ch;
1375 struct cppi *controller;
1376 void __iomem *mbase;
1377 void __iomem *tibase;
1378 void __iomem *regs;
1379 u32 value;
1380 struct cppi_descriptor *queue;
1381
1382 cppi_ch = container_of(channel, struct cppi_channel, channel);
1383
1384 controller = cppi_ch->controller;
1385
1386 switch (channel->status) {
1387 case MUSB_DMA_STATUS_BUS_ABORT:
1388 case MUSB_DMA_STATUS_CORE_ABORT:
1389
1390 case MUSB_DMA_STATUS_BUSY:
1391
1392 regs = cppi_ch->hw_ep->regs;
1393 break;
1394 case MUSB_DMA_STATUS_UNKNOWN:
1395 case MUSB_DMA_STATUS_FREE:
1396 return 0;
1397 default:
1398 return -EINVAL;
1399 }
1400
1401 if (!cppi_ch->transmit && cppi_ch->head)
1402 cppi_dump_rxq(3, "/abort", cppi_ch);
1403
1404 mbase = controller->mregs;
1405 tibase = controller->tibase;
1406
1407 queue = cppi_ch->head;
1408 cppi_ch->head = NULL;
1409 cppi_ch->tail = NULL;
1410
1411
1412
1413
1414
1415 musb_ep_select(mbase, cppi_ch->index + 1);
1416
1417 if (cppi_ch->transmit) {
1418 struct cppi_tx_stateram __iomem *tx_ram;
1419
1420
1421 cppi_dump_tx(6, cppi_ch, " (teardown)");
1422
1423
1424 do {
1425 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
1426 } while (!(value & CPPI_TEAR_READY));
1427 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
1428
1429 tx_ram = cppi_ch->state_ram;
1430 do {
1431 value = musb_readl(&tx_ram->tx_complete, 0);
1432 } while (0xFFFFFFFC != value);
1433
1434
1435
1436
1437
1438
1439 value = musb_readw(regs, MUSB_TXCSR);
1440 value &= ~MUSB_TXCSR_DMAENAB;
1441 value |= MUSB_TXCSR_FLUSHFIFO;
1442 musb_writew(regs, MUSB_TXCSR, value);
1443 musb_writew(regs, MUSB_TXCSR, value);
1444
1445
1446
1447
1448
1449
1450
1451 cppi_reset_tx(tx_ram, 1);
1452 cppi_ch->head = NULL;
1453 musb_writel(&tx_ram->tx_complete, 0, 1);
1454 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1455
1456
1457
1458
1459
1460 } else {
1461 u16 csr;
1462
1463
1464
1465
1466
1467
1468
1469 core_rxirq_disable(tibase, cppi_ch->index + 1);
1470
1471
1472 if (is_host_active(cppi_ch->controller->controller.musb)) {
1473 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
1474 value &= ~((0x3) << (cppi_ch->index * 2));
1475 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
1476 }
1477
1478 csr = musb_readw(regs, MUSB_RXCSR);
1479
1480
1481 if (is_host_active(cppi_ch->controller->controller.musb)) {
1482 csr |= MUSB_RXCSR_H_WZC_BITS;
1483 csr &= ~MUSB_RXCSR_H_REQPKT;
1484 } else
1485 csr |= MUSB_RXCSR_P_WZC_BITS;
1486
1487
1488 csr &= ~(MUSB_RXCSR_DMAENAB);
1489 musb_writew(regs, MUSB_RXCSR, csr);
1490 csr = musb_readw(regs, MUSB_RXCSR);
1491
1492
1493
1494
1495
1496
1497 if (channel->status == MUSB_DMA_STATUS_BUSY)
1498 udelay(50);
1499
1500
1501
1502
1503 cppi_rx_scan(controller, cppi_ch->index);
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 cppi_reset_rx(cppi_ch->state_ram);
1517
1518
1519
1520
1521 cppi_dump_rx(5, cppi_ch, " (done abort)");
1522
1523
1524 cppi_bd_free(cppi_ch, cppi_ch->last_processed);
1525 cppi_ch->last_processed = NULL;
1526
1527 while (queue) {
1528 struct cppi_descriptor *tmp = queue->next;
1529
1530 cppi_bd_free(cppi_ch, queue);
1531 queue = tmp;
1532 }
1533 }
1534
1535 channel->status = MUSB_DMA_STATUS_FREE;
1536 cppi_ch->buf_dma = 0;
1537 cppi_ch->offset = 0;
1538 cppi_ch->buf_len = 0;
1539 cppi_ch->maxpacket = 0;
1540 return 0;
1541 }
1542
1543
1544
1545
1546
1547