This source file includes following definitions.
- nbpf_chan_read
- nbpf_chan_write
- nbpf_read
- nbpf_write
- nbpf_chan_halt
- nbpf_status_get
- nbpf_status_ack
- nbpf_error_get
- nbpf_error_get_channel
- nbpf_error_clear
- nbpf_start
- nbpf_chan_prepare
- nbpf_chan_prepare_default
- nbpf_chan_configure
- nbpf_xfer_ds
- nbpf_xfer_size
- nbpf_prep_one
- nbpf_bytes_left
- nbpf_configure
- nbpf_issue_pending
- nbpf_tx_status
- nbpf_tx_submit
- nbpf_desc_page_alloc
- nbpf_desc_put
- nbpf_scan_acked
- nbpf_desc_get
- nbpf_chan_idle
- nbpf_pause
- nbpf_terminate_all
- nbpf_config
- nbpf_prep_sg
- nbpf_prep_memcpy
- nbpf_prep_slave_sg
- nbpf_alloc_chan_resources
- nbpf_free_chan_resources
- nbpf_of_xlate
- nbpf_chan_tasklet
- nbpf_chan_irq
- nbpf_err_irq
- nbpf_chan_probe
- nbpf_probe
- nbpf_remove
- nbpf_runtime_suspend
- nbpf_runtime_resume
1
2
3
4
5
6
7 #include <linux/bitmap.h>
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/log2.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 #include <linux/of_dma.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22
23 #include <dt-bindings/dma/nbpfaxi.h>
24
25 #include "dmaengine.h"
26
27 #define NBPF_REG_CHAN_OFFSET 0
28 #define NBPF_REG_CHAN_SIZE 0x40
29
30
31 #define NBPF_CHAN_CUR_TR_BYTE 0x20
32
33
34 #define NBPF_CHAN_STAT 0x24
35 #define NBPF_CHAN_STAT_EN 1
36 #define NBPF_CHAN_STAT_TACT 4
37 #define NBPF_CHAN_STAT_ERR 0x10
38 #define NBPF_CHAN_STAT_END 0x20
39 #define NBPF_CHAN_STAT_TC 0x40
40 #define NBPF_CHAN_STAT_DER 0x400
41
42
43 #define NBPF_CHAN_CTRL 0x28
44 #define NBPF_CHAN_CTRL_SETEN 1
45 #define NBPF_CHAN_CTRL_CLREN 2
46 #define NBPF_CHAN_CTRL_STG 4
47 #define NBPF_CHAN_CTRL_SWRST 8
48 #define NBPF_CHAN_CTRL_CLRRQ 0x10
49 #define NBPF_CHAN_CTRL_CLREND 0x20
50 #define NBPF_CHAN_CTRL_CLRTC 0x40
51 #define NBPF_CHAN_CTRL_SETSUS 0x100
52 #define NBPF_CHAN_CTRL_CLRSUS 0x200
53
54
55 #define NBPF_CHAN_CFG 0x2c
56 #define NBPF_CHAN_CFG_SEL 7
57 #define NBPF_CHAN_CFG_REQD 8
58 #define NBPF_CHAN_CFG_LOEN 0x10
59 #define NBPF_CHAN_CFG_HIEN 0x20
60 #define NBPF_CHAN_CFG_LVL 0x40
61 #define NBPF_CHAN_CFG_AM 0x700
62 #define NBPF_CHAN_CFG_SDS 0xf000
63 #define NBPF_CHAN_CFG_DDS 0xf0000
64 #define NBPF_CHAN_CFG_SAD 0x100000
65 #define NBPF_CHAN_CFG_DAD 0x200000
66 #define NBPF_CHAN_CFG_TM 0x400000
67 #define NBPF_CHAN_CFG_DEM 0x1000000
68 #define NBPF_CHAN_CFG_TCM 0x2000000
69 #define NBPF_CHAN_CFG_SBE 0x8000000
70 #define NBPF_CHAN_CFG_RSEL 0x10000000
71 #define NBPF_CHAN_CFG_RSW 0x20000000
72 #define NBPF_CHAN_CFG_REN 0x40000000
73 #define NBPF_CHAN_CFG_DMS 0x80000000
74
75 #define NBPF_CHAN_NXLA 0x38
76 #define NBPF_CHAN_CRLA 0x3c
77
78
79 #define NBPF_HEADER_LV 1
80 #define NBPF_HEADER_LE 2
81 #define NBPF_HEADER_WBD 4
82 #define NBPF_HEADER_DIM 8
83
84 #define NBPF_CTRL 0x300
85 #define NBPF_CTRL_PR 1
86 #define NBPF_CTRL_LVINT 2
87
88 #define NBPF_DSTAT_ER 0x314
89 #define NBPF_DSTAT_END 0x318
90
91 #define NBPF_DMA_BUSWIDTHS \
92 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
93 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
94 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
95 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
96 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
97
98 struct nbpf_config {
99 int num_channels;
100 int buffer_size;
101 };
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120 struct nbpf_link_reg {
121 u32 header;
122 u32 src_addr;
123 u32 dst_addr;
124 u32 transaction_size;
125 u32 config;
126 u32 interval;
127 u32 extension;
128 u32 next;
129 } __packed;
130
131 struct nbpf_device;
132 struct nbpf_channel;
133 struct nbpf_desc;
134
135 struct nbpf_link_desc {
136 struct nbpf_link_reg *hwdesc;
137 dma_addr_t hwdesc_dma_addr;
138 struct nbpf_desc *desc;
139 struct list_head node;
140 };
141
142
143
144
145
146
147
148
149
150 struct nbpf_desc {
151 struct dma_async_tx_descriptor async_tx;
152 bool user_wait;
153 size_t length;
154 struct nbpf_channel *chan;
155 struct list_head sg;
156 struct list_head node;
157 };
158
159
160 #define NBPF_SEGMENTS_PER_DESC 4
161 #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \
162 (sizeof(struct nbpf_desc) + \
163 NBPF_SEGMENTS_PER_DESC * \
164 (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
165 #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
166
167 struct nbpf_desc_page {
168 struct list_head node;
169 struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
170 struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
171 struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
172 };
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 struct nbpf_channel {
196 struct dma_chan dma_chan;
197 struct tasklet_struct tasklet;
198 void __iomem *base;
199 struct nbpf_device *nbpf;
200 char name[16];
201 int irq;
202 dma_addr_t slave_src_addr;
203 size_t slave_src_width;
204 size_t slave_src_burst;
205 dma_addr_t slave_dst_addr;
206 size_t slave_dst_width;
207 size_t slave_dst_burst;
208 unsigned int terminal;
209 u32 dmarq_cfg;
210 unsigned long flags;
211 spinlock_t lock;
212 struct list_head free_links;
213 struct list_head free;
214 struct list_head queued;
215 struct list_head active;
216 struct list_head done;
217 struct list_head desc_page;
218 struct nbpf_desc *running;
219 bool paused;
220 };
221
222 struct nbpf_device {
223 struct dma_device dma_dev;
224 void __iomem *base;
225 u32 max_burst_mem_read;
226 u32 max_burst_mem_write;
227 struct clk *clk;
228 const struct nbpf_config *config;
229 unsigned int eirq;
230 struct nbpf_channel chan[];
231 };
232
233 enum nbpf_model {
234 NBPF1B4,
235 NBPF1B8,
236 NBPF1B16,
237 NBPF4B4,
238 NBPF4B8,
239 NBPF4B16,
240 NBPF8B4,
241 NBPF8B8,
242 NBPF8B16,
243 };
244
245 static struct nbpf_config nbpf_cfg[] = {
246 [NBPF1B4] = {
247 .num_channels = 1,
248 .buffer_size = 4,
249 },
250 [NBPF1B8] = {
251 .num_channels = 1,
252 .buffer_size = 8,
253 },
254 [NBPF1B16] = {
255 .num_channels = 1,
256 .buffer_size = 16,
257 },
258 [NBPF4B4] = {
259 .num_channels = 4,
260 .buffer_size = 4,
261 },
262 [NBPF4B8] = {
263 .num_channels = 4,
264 .buffer_size = 8,
265 },
266 [NBPF4B16] = {
267 .num_channels = 4,
268 .buffer_size = 16,
269 },
270 [NBPF8B4] = {
271 .num_channels = 8,
272 .buffer_size = 4,
273 },
274 [NBPF8B8] = {
275 .num_channels = 8,
276 .buffer_size = 8,
277 },
278 [NBPF8B16] = {
279 .num_channels = 8,
280 .buffer_size = 16,
281 },
282 };
283
284 #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
285
286
287
288
289
290
291
292
293
294
295
296
297
298 static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
299 unsigned int offset)
300 {
301 u32 data = ioread32(chan->base + offset);
302 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
303 __func__, chan->base, offset, data);
304 return data;
305 }
306
307 static inline void nbpf_chan_write(struct nbpf_channel *chan,
308 unsigned int offset, u32 data)
309 {
310 iowrite32(data, chan->base + offset);
311 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
312 __func__, chan->base, offset, data);
313 }
314
315 static inline u32 nbpf_read(struct nbpf_device *nbpf,
316 unsigned int offset)
317 {
318 u32 data = ioread32(nbpf->base + offset);
319 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
320 __func__, nbpf->base, offset, data);
321 return data;
322 }
323
324 static inline void nbpf_write(struct nbpf_device *nbpf,
325 unsigned int offset, u32 data)
326 {
327 iowrite32(data, nbpf->base + offset);
328 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
329 __func__, nbpf->base, offset, data);
330 }
331
332 static void nbpf_chan_halt(struct nbpf_channel *chan)
333 {
334 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
335 }
336
337 static bool nbpf_status_get(struct nbpf_channel *chan)
338 {
339 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
340
341 return status & BIT(chan - chan->nbpf->chan);
342 }
343
344 static void nbpf_status_ack(struct nbpf_channel *chan)
345 {
346 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
347 }
348
349 static u32 nbpf_error_get(struct nbpf_device *nbpf)
350 {
351 return nbpf_read(nbpf, NBPF_DSTAT_ER);
352 }
353
354 static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
355 {
356 return nbpf->chan + __ffs(error);
357 }
358
359 static void nbpf_error_clear(struct nbpf_channel *chan)
360 {
361 u32 status;
362 int i;
363
364
365 nbpf_chan_halt(chan);
366
367 for (i = 1000; i; i--) {
368 status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
369 if (!(status & NBPF_CHAN_STAT_TACT))
370 break;
371 cpu_relax();
372 }
373
374 if (!i)
375 dev_err(chan->dma_chan.device->dev,
376 "%s(): abort timeout, channel status 0x%x\n", __func__, status);
377
378 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
379 }
380
381 static int nbpf_start(struct nbpf_desc *desc)
382 {
383 struct nbpf_channel *chan = desc->chan;
384 struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
385
386 nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
387 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
388 chan->paused = false;
389
390
391 if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
392 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
393
394 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
395 nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
396
397 return 0;
398 }
399
400 static void nbpf_chan_prepare(struct nbpf_channel *chan)
401 {
402 chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
403 (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
404 (chan->flags & NBPF_SLAVE_RQ_LEVEL ?
405 NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
406 chan->terminal;
407 }
408
409 static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
410 {
411
412 chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
413 chan->terminal = 0;
414 chan->flags = 0;
415 }
416
417 static void nbpf_chan_configure(struct nbpf_channel *chan)
418 {
419
420
421
422
423
424 nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
425 }
426
427 static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
428 enum dma_transfer_direction direction)
429 {
430 int max_burst = nbpf->config->buffer_size * 8;
431
432 if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
433 switch (direction) {
434 case DMA_MEM_TO_MEM:
435 max_burst = min_not_zero(nbpf->max_burst_mem_read,
436 nbpf->max_burst_mem_write);
437 break;
438 case DMA_MEM_TO_DEV:
439 if (nbpf->max_burst_mem_read)
440 max_burst = nbpf->max_burst_mem_read;
441 break;
442 case DMA_DEV_TO_MEM:
443 if (nbpf->max_burst_mem_write)
444 max_burst = nbpf->max_burst_mem_write;
445 break;
446 case DMA_DEV_TO_DEV:
447 default:
448 break;
449 }
450 }
451
452
453 return min_t(int, __ffs(size), ilog2(max_burst));
454 }
455
456 static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
457 enum dma_slave_buswidth width, u32 burst)
458 {
459 size_t size;
460
461 if (!burst)
462 burst = 1;
463
464 switch (width) {
465 case DMA_SLAVE_BUSWIDTH_8_BYTES:
466 size = 8 * burst;
467 break;
468
469 case DMA_SLAVE_BUSWIDTH_4_BYTES:
470 size = 4 * burst;
471 break;
472
473 case DMA_SLAVE_BUSWIDTH_2_BYTES:
474 size = 2 * burst;
475 break;
476
477 default:
478 pr_warn("%s(): invalid bus width %u\n", __func__, width);
479
480 case DMA_SLAVE_BUSWIDTH_1_BYTE:
481 size = burst;
482 }
483
484 return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
485 }
486
487
488
489
490
491
492
493
494
495
496
497
498
499 static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
500 enum dma_transfer_direction direction,
501 dma_addr_t src, dma_addr_t dst, size_t size, bool last)
502 {
503 struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
504 struct nbpf_desc *desc = ldesc->desc;
505 struct nbpf_channel *chan = desc->chan;
506 struct device *dev = chan->dma_chan.device->dev;
507 size_t mem_xfer, slave_xfer;
508 bool can_burst;
509
510 hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
511 (last ? NBPF_HEADER_LE : 0);
512
513 hwdesc->src_addr = src;
514 hwdesc->dst_addr = dst;
515 hwdesc->transaction_size = size;
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
534
535 switch (direction) {
536 case DMA_DEV_TO_MEM:
537 can_burst = chan->slave_src_width >= 3;
538 slave_xfer = min(mem_xfer, can_burst ?
539 chan->slave_src_burst : chan->slave_src_width);
540
541
542
543
544 if (mem_xfer > chan->slave_src_burst && !can_burst)
545 mem_xfer = chan->slave_src_burst;
546
547 hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
548 (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
549 NBPF_CHAN_CFG_SBE;
550 break;
551
552 case DMA_MEM_TO_DEV:
553 slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
554 chan->slave_dst_burst : chan->slave_dst_width);
555 hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
556 (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
557 break;
558
559 case DMA_MEM_TO_MEM:
560 hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
561 (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
562 (NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
563 break;
564
565 default:
566 return -EINVAL;
567 }
568
569 hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
570 NBPF_CHAN_CFG_DMS;
571
572 dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
573 __func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
574 hwdesc->config, size, &src, &dst);
575
576 dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
577 DMA_TO_DEVICE);
578
579 return 0;
580 }
581
582 static size_t nbpf_bytes_left(struct nbpf_channel *chan)
583 {
584 return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
585 }
586
587 static void nbpf_configure(struct nbpf_device *nbpf)
588 {
589 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
590 }
591
592
593
594
595 static void nbpf_issue_pending(struct dma_chan *dchan)
596 {
597 struct nbpf_channel *chan = nbpf_to_chan(dchan);
598 unsigned long flags;
599
600 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
601
602 spin_lock_irqsave(&chan->lock, flags);
603 if (list_empty(&chan->queued))
604 goto unlock;
605
606 list_splice_tail_init(&chan->queued, &chan->active);
607
608 if (!chan->running) {
609 struct nbpf_desc *desc = list_first_entry(&chan->active,
610 struct nbpf_desc, node);
611 if (!nbpf_start(desc))
612 chan->running = desc;
613 }
614
615 unlock:
616 spin_unlock_irqrestore(&chan->lock, flags);
617 }
618
619 static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
620 dma_cookie_t cookie, struct dma_tx_state *state)
621 {
622 struct nbpf_channel *chan = nbpf_to_chan(dchan);
623 enum dma_status status = dma_cookie_status(dchan, cookie, state);
624
625 if (state) {
626 dma_cookie_t running;
627 unsigned long flags;
628
629 spin_lock_irqsave(&chan->lock, flags);
630 running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
631
632 if (cookie == running) {
633 state->residue = nbpf_bytes_left(chan);
634 dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
635 state->residue);
636 } else if (status == DMA_IN_PROGRESS) {
637 struct nbpf_desc *desc;
638 bool found = false;
639
640 list_for_each_entry(desc, &chan->active, node)
641 if (desc->async_tx.cookie == cookie) {
642 found = true;
643 break;
644 }
645
646 if (!found)
647 list_for_each_entry(desc, &chan->queued, node)
648 if (desc->async_tx.cookie == cookie) {
649 found = true;
650 break;
651
652 }
653
654 state->residue = found ? desc->length : 0;
655 }
656
657 spin_unlock_irqrestore(&chan->lock, flags);
658 }
659
660 if (chan->paused)
661 status = DMA_PAUSED;
662
663 return status;
664 }
665
666 static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
667 {
668 struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
669 struct nbpf_channel *chan = desc->chan;
670 unsigned long flags;
671 dma_cookie_t cookie;
672
673 spin_lock_irqsave(&chan->lock, flags);
674 cookie = dma_cookie_assign(tx);
675 list_add_tail(&desc->node, &chan->queued);
676 spin_unlock_irqrestore(&chan->lock, flags);
677
678 dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
679
680 return cookie;
681 }
682
683 static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
684 {
685 struct dma_chan *dchan = &chan->dma_chan;
686 struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
687 struct nbpf_link_desc *ldesc;
688 struct nbpf_link_reg *hwdesc;
689 struct nbpf_desc *desc;
690 LIST_HEAD(head);
691 LIST_HEAD(lhead);
692 int i;
693 struct device *dev = dchan->device->dev;
694
695 if (!dpage)
696 return -ENOMEM;
697
698 dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
699 __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
700
701 for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
702 i < ARRAY_SIZE(dpage->ldesc);
703 i++, ldesc++, hwdesc++) {
704 ldesc->hwdesc = hwdesc;
705 list_add_tail(&ldesc->node, &lhead);
706 ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
707 hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
708
709 dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
710 hwdesc, &ldesc->hwdesc_dma_addr);
711 }
712
713 for (i = 0, desc = dpage->desc;
714 i < ARRAY_SIZE(dpage->desc);
715 i++, desc++) {
716 dma_async_tx_descriptor_init(&desc->async_tx, dchan);
717 desc->async_tx.tx_submit = nbpf_tx_submit;
718 desc->chan = chan;
719 INIT_LIST_HEAD(&desc->sg);
720 list_add_tail(&desc->node, &head);
721 }
722
723
724
725
726
727 spin_lock_irq(&chan->lock);
728 list_splice_tail(&lhead, &chan->free_links);
729 list_splice_tail(&head, &chan->free);
730 list_add(&dpage->node, &chan->desc_page);
731 spin_unlock_irq(&chan->lock);
732
733 return ARRAY_SIZE(dpage->desc);
734 }
735
736 static void nbpf_desc_put(struct nbpf_desc *desc)
737 {
738 struct nbpf_channel *chan = desc->chan;
739 struct nbpf_link_desc *ldesc, *tmp;
740 unsigned long flags;
741
742 spin_lock_irqsave(&chan->lock, flags);
743 list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
744 list_move(&ldesc->node, &chan->free_links);
745
746 list_add(&desc->node, &chan->free);
747 spin_unlock_irqrestore(&chan->lock, flags);
748 }
749
750 static void nbpf_scan_acked(struct nbpf_channel *chan)
751 {
752 struct nbpf_desc *desc, *tmp;
753 unsigned long flags;
754 LIST_HEAD(head);
755
756 spin_lock_irqsave(&chan->lock, flags);
757 list_for_each_entry_safe(desc, tmp, &chan->done, node)
758 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
759 list_move(&desc->node, &head);
760 desc->user_wait = false;
761 }
762 spin_unlock_irqrestore(&chan->lock, flags);
763
764 list_for_each_entry_safe(desc, tmp, &head, node) {
765 list_del(&desc->node);
766 nbpf_desc_put(desc);
767 }
768 }
769
770
771
772
773
774
775
776 static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
777 {
778 struct nbpf_desc *desc = NULL;
779 struct nbpf_link_desc *ldesc, *prev = NULL;
780
781 nbpf_scan_acked(chan);
782
783 spin_lock_irq(&chan->lock);
784
785 do {
786 int i = 0, ret;
787
788 if (list_empty(&chan->free)) {
789
790 spin_unlock_irq(&chan->lock);
791 ret = nbpf_desc_page_alloc(chan);
792 if (ret < 0)
793 return NULL;
794 spin_lock_irq(&chan->lock);
795 continue;
796 }
797 desc = list_first_entry(&chan->free, struct nbpf_desc, node);
798 list_del(&desc->node);
799
800 do {
801 if (list_empty(&chan->free_links)) {
802
803 spin_unlock_irq(&chan->lock);
804 ret = nbpf_desc_page_alloc(chan);
805 if (ret < 0) {
806 nbpf_desc_put(desc);
807 return NULL;
808 }
809 spin_lock_irq(&chan->lock);
810 continue;
811 }
812
813 ldesc = list_first_entry(&chan->free_links,
814 struct nbpf_link_desc, node);
815 ldesc->desc = desc;
816 if (prev)
817 prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
818
819 prev = ldesc;
820 list_move_tail(&ldesc->node, &desc->sg);
821
822 i++;
823 } while (i < len);
824 } while (!desc);
825
826 prev->hwdesc->next = 0;
827
828 spin_unlock_irq(&chan->lock);
829
830 return desc;
831 }
832
833 static void nbpf_chan_idle(struct nbpf_channel *chan)
834 {
835 struct nbpf_desc *desc, *tmp;
836 unsigned long flags;
837 LIST_HEAD(head);
838
839 spin_lock_irqsave(&chan->lock, flags);
840
841 list_splice_init(&chan->done, &head);
842 list_splice_init(&chan->active, &head);
843 list_splice_init(&chan->queued, &head);
844
845 chan->running = NULL;
846
847 spin_unlock_irqrestore(&chan->lock, flags);
848
849 list_for_each_entry_safe(desc, tmp, &head, node) {
850 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
851 __func__, desc, desc->async_tx.cookie);
852 list_del(&desc->node);
853 nbpf_desc_put(desc);
854 }
855 }
856
857 static int nbpf_pause(struct dma_chan *dchan)
858 {
859 struct nbpf_channel *chan = nbpf_to_chan(dchan);
860
861 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
862
863 chan->paused = true;
864 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
865
866 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
867
868 return 0;
869 }
870
871 static int nbpf_terminate_all(struct dma_chan *dchan)
872 {
873 struct nbpf_channel *chan = nbpf_to_chan(dchan);
874
875 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
876 dev_dbg(dchan->device->dev, "Terminating\n");
877
878 nbpf_chan_halt(chan);
879 nbpf_chan_idle(chan);
880
881 return 0;
882 }
883
884 static int nbpf_config(struct dma_chan *dchan,
885 struct dma_slave_config *config)
886 {
887 struct nbpf_channel *chan = nbpf_to_chan(dchan);
888
889 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
890
891
892
893
894
895
896
897 chan->slave_dst_addr = config->dst_addr;
898 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
899 config->dst_addr_width, 1);
900 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
901 config->dst_addr_width,
902 config->dst_maxburst);
903 chan->slave_src_addr = config->src_addr;
904 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
905 config->src_addr_width, 1);
906 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
907 config->src_addr_width,
908 config->src_maxburst);
909
910 return 0;
911 }
912
913 static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
914 struct scatterlist *src_sg, struct scatterlist *dst_sg,
915 size_t len, enum dma_transfer_direction direction,
916 unsigned long flags)
917 {
918 struct nbpf_link_desc *ldesc;
919 struct scatterlist *mem_sg;
920 struct nbpf_desc *desc;
921 bool inc_src, inc_dst;
922 size_t data_len = 0;
923 int i = 0;
924
925 switch (direction) {
926 case DMA_DEV_TO_MEM:
927 mem_sg = dst_sg;
928 inc_src = false;
929 inc_dst = true;
930 break;
931
932 case DMA_MEM_TO_DEV:
933 mem_sg = src_sg;
934 inc_src = true;
935 inc_dst = false;
936 break;
937
938 default:
939 case DMA_MEM_TO_MEM:
940 mem_sg = src_sg;
941 inc_src = true;
942 inc_dst = true;
943 }
944
945 desc = nbpf_desc_get(chan, len);
946 if (!desc)
947 return NULL;
948
949 desc->async_tx.flags = flags;
950 desc->async_tx.cookie = -EBUSY;
951 desc->user_wait = false;
952
953
954
955
956
957 list_for_each_entry(ldesc, &desc->sg, node) {
958 int ret = nbpf_prep_one(ldesc, direction,
959 sg_dma_address(src_sg),
960 sg_dma_address(dst_sg),
961 sg_dma_len(mem_sg),
962 i == len - 1);
963 if (ret < 0) {
964 nbpf_desc_put(desc);
965 return NULL;
966 }
967 data_len += sg_dma_len(mem_sg);
968 if (inc_src)
969 src_sg = sg_next(src_sg);
970 if (inc_dst)
971 dst_sg = sg_next(dst_sg);
972 mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
973 i++;
974 }
975
976 desc->length = data_len;
977
978
979 return &desc->async_tx;
980 }
981
982 static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
983 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
984 size_t len, unsigned long flags)
985 {
986 struct nbpf_channel *chan = nbpf_to_chan(dchan);
987 struct scatterlist dst_sg;
988 struct scatterlist src_sg;
989
990 sg_init_table(&dst_sg, 1);
991 sg_init_table(&src_sg, 1);
992
993 sg_dma_address(&dst_sg) = dst;
994 sg_dma_address(&src_sg) = src;
995
996 sg_dma_len(&dst_sg) = len;
997 sg_dma_len(&src_sg) = len;
998
999 dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
1000 __func__, len, &src, &dst);
1001
1002 return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
1003 DMA_MEM_TO_MEM, flags);
1004 }
1005
1006 static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
1007 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1008 enum dma_transfer_direction direction, unsigned long flags, void *context)
1009 {
1010 struct nbpf_channel *chan = nbpf_to_chan(dchan);
1011 struct scatterlist slave_sg;
1012
1013 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1014
1015 sg_init_table(&slave_sg, 1);
1016
1017 switch (direction) {
1018 case DMA_MEM_TO_DEV:
1019 sg_dma_address(&slave_sg) = chan->slave_dst_addr;
1020 return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
1021 direction, flags);
1022
1023 case DMA_DEV_TO_MEM:
1024 sg_dma_address(&slave_sg) = chan->slave_src_addr;
1025 return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
1026 direction, flags);
1027
1028 default:
1029 return NULL;
1030 }
1031 }
1032
1033 static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
1034 {
1035 struct nbpf_channel *chan = nbpf_to_chan(dchan);
1036 int ret;
1037
1038 INIT_LIST_HEAD(&chan->free);
1039 INIT_LIST_HEAD(&chan->free_links);
1040 INIT_LIST_HEAD(&chan->queued);
1041 INIT_LIST_HEAD(&chan->active);
1042 INIT_LIST_HEAD(&chan->done);
1043
1044 ret = nbpf_desc_page_alloc(chan);
1045 if (ret < 0)
1046 return ret;
1047
1048 dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
1049 chan->terminal);
1050
1051 nbpf_chan_configure(chan);
1052
1053 return ret;
1054 }
1055
1056 static void nbpf_free_chan_resources(struct dma_chan *dchan)
1057 {
1058 struct nbpf_channel *chan = nbpf_to_chan(dchan);
1059 struct nbpf_desc_page *dpage, *tmp;
1060
1061 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1062
1063 nbpf_chan_halt(chan);
1064 nbpf_chan_idle(chan);
1065
1066 nbpf_chan_prepare_default(chan);
1067
1068 list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
1069 struct nbpf_link_desc *ldesc;
1070 int i;
1071 list_del(&dpage->node);
1072 for (i = 0, ldesc = dpage->ldesc;
1073 i < ARRAY_SIZE(dpage->ldesc);
1074 i++, ldesc++)
1075 dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
1076 sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
1077 free_page((unsigned long)dpage);
1078 }
1079 }
1080
1081 static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1082 struct of_dma *ofdma)
1083 {
1084 struct nbpf_device *nbpf = ofdma->of_dma_data;
1085 struct dma_chan *dchan;
1086 struct nbpf_channel *chan;
1087
1088 if (dma_spec->args_count != 2)
1089 return NULL;
1090
1091 dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1092 if (!dchan)
1093 return NULL;
1094
1095 dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
1096 dma_spec->np);
1097
1098 chan = nbpf_to_chan(dchan);
1099
1100 chan->terminal = dma_spec->args[0];
1101 chan->flags = dma_spec->args[1];
1102
1103 nbpf_chan_prepare(chan);
1104 nbpf_chan_configure(chan);
1105
1106 return dchan;
1107 }
1108
1109 static void nbpf_chan_tasklet(unsigned long data)
1110 {
1111 struct nbpf_channel *chan = (struct nbpf_channel *)data;
1112 struct nbpf_desc *desc, *tmp;
1113 struct dmaengine_desc_callback cb;
1114
1115 while (!list_empty(&chan->done)) {
1116 bool found = false, must_put, recycling = false;
1117
1118 spin_lock_irq(&chan->lock);
1119
1120 list_for_each_entry_safe(desc, tmp, &chan->done, node) {
1121 if (!desc->user_wait) {
1122
1123 found = true;
1124 break;
1125 } else if (async_tx_test_ack(&desc->async_tx)) {
1126
1127
1128
1129
1130 list_del(&desc->node);
1131 spin_unlock_irq(&chan->lock);
1132 nbpf_desc_put(desc);
1133 recycling = true;
1134 break;
1135 }
1136 }
1137
1138 if (recycling)
1139 continue;
1140
1141 if (!found) {
1142
1143 spin_unlock_irq(&chan->lock);
1144 break;
1145 }
1146
1147 dma_cookie_complete(&desc->async_tx);
1148
1149
1150
1151
1152
1153 if (async_tx_test_ack(&desc->async_tx)) {
1154 list_del(&desc->node);
1155 must_put = true;
1156 } else {
1157 desc->user_wait = true;
1158 must_put = false;
1159 }
1160
1161 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1162
1163
1164 spin_unlock_irq(&chan->lock);
1165
1166 dmaengine_desc_callback_invoke(&cb, NULL);
1167
1168 if (must_put)
1169 nbpf_desc_put(desc);
1170 }
1171 }
1172
1173 static irqreturn_t nbpf_chan_irq(int irq, void *dev)
1174 {
1175 struct nbpf_channel *chan = dev;
1176 bool done = nbpf_status_get(chan);
1177 struct nbpf_desc *desc;
1178 irqreturn_t ret;
1179 bool bh = false;
1180
1181 if (!done)
1182 return IRQ_NONE;
1183
1184 nbpf_status_ack(chan);
1185
1186 dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
1187
1188 spin_lock(&chan->lock);
1189 desc = chan->running;
1190 if (WARN_ON(!desc)) {
1191 ret = IRQ_NONE;
1192 goto unlock;
1193 } else {
1194 ret = IRQ_HANDLED;
1195 bh = true;
1196 }
1197
1198 list_move_tail(&desc->node, &chan->done);
1199 chan->running = NULL;
1200
1201 if (!list_empty(&chan->active)) {
1202 desc = list_first_entry(&chan->active,
1203 struct nbpf_desc, node);
1204 if (!nbpf_start(desc))
1205 chan->running = desc;
1206 }
1207
1208 unlock:
1209 spin_unlock(&chan->lock);
1210
1211 if (bh)
1212 tasklet_schedule(&chan->tasklet);
1213
1214 return ret;
1215 }
1216
1217 static irqreturn_t nbpf_err_irq(int irq, void *dev)
1218 {
1219 struct nbpf_device *nbpf = dev;
1220 u32 error = nbpf_error_get(nbpf);
1221
1222 dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1223
1224 if (!error)
1225 return IRQ_NONE;
1226
1227 do {
1228 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1229
1230 nbpf_error_clear(chan);
1231 nbpf_chan_idle(chan);
1232 error = nbpf_error_get(nbpf);
1233 } while (error);
1234
1235 return IRQ_HANDLED;
1236 }
1237
1238 static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1239 {
1240 struct dma_device *dma_dev = &nbpf->dma_dev;
1241 struct nbpf_channel *chan = nbpf->chan + n;
1242 int ret;
1243
1244 chan->nbpf = nbpf;
1245 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1246 INIT_LIST_HEAD(&chan->desc_page);
1247 spin_lock_init(&chan->lock);
1248 chan->dma_chan.device = dma_dev;
1249 dma_cookie_init(&chan->dma_chan);
1250 nbpf_chan_prepare_default(chan);
1251
1252 dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
1253
1254 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1255
1256 tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan);
1257 ret = devm_request_irq(dma_dev->dev, chan->irq,
1258 nbpf_chan_irq, IRQF_SHARED,
1259 chan->name, chan);
1260 if (ret < 0)
1261 return ret;
1262
1263
1264 list_add_tail(&chan->dma_chan.device_node,
1265 &dma_dev->channels);
1266
1267 return 0;
1268 }
1269
1270 static const struct of_device_id nbpf_match[] = {
1271 {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]},
1272 {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]},
1273 {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]},
1274 {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]},
1275 {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]},
1276 {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]},
1277 {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]},
1278 {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]},
1279 {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]},
1280 {}
1281 };
1282 MODULE_DEVICE_TABLE(of, nbpf_match);
1283
1284 static int nbpf_probe(struct platform_device *pdev)
1285 {
1286 struct device *dev = &pdev->dev;
1287 struct device_node *np = dev->of_node;
1288 struct nbpf_device *nbpf;
1289 struct dma_device *dma_dev;
1290 struct resource *iomem, *irq_res;
1291 const struct nbpf_config *cfg;
1292 int num_channels;
1293 int ret, irq, eirq, i;
1294 int irqbuf[9] ;
1295 unsigned int irqs = 0;
1296
1297 BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
1298
1299
1300 if (!np)
1301 return -ENODEV;
1302
1303 cfg = of_device_get_match_data(dev);
1304 num_channels = cfg->num_channels;
1305
1306 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
1307 GFP_KERNEL);
1308 if (!nbpf)
1309 return -ENOMEM;
1310
1311 dma_dev = &nbpf->dma_dev;
1312 dma_dev->dev = dev;
1313
1314 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1315 nbpf->base = devm_ioremap_resource(dev, iomem);
1316 if (IS_ERR(nbpf->base))
1317 return PTR_ERR(nbpf->base);
1318
1319 nbpf->clk = devm_clk_get(dev, NULL);
1320 if (IS_ERR(nbpf->clk))
1321 return PTR_ERR(nbpf->clk);
1322
1323 of_property_read_u32(np, "max-burst-mem-read",
1324 &nbpf->max_burst_mem_read);
1325 of_property_read_u32(np, "max-burst-mem-write",
1326 &nbpf->max_burst_mem_write);
1327
1328 nbpf->config = cfg;
1329
1330 for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
1331 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1332 if (!irq_res)
1333 break;
1334
1335 for (irq = irq_res->start; irq <= irq_res->end;
1336 irq++, irqs++)
1337 irqbuf[irqs] = irq;
1338 }
1339
1340
1341
1342
1343
1344
1345
1346 if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
1347 return -ENXIO;
1348
1349 if (irqs == 1) {
1350 eirq = irqbuf[0];
1351
1352 for (i = 0; i <= num_channels; i++)
1353 nbpf->chan[i].irq = irqbuf[0];
1354 } else {
1355 eirq = platform_get_irq_byname(pdev, "error");
1356 if (eirq < 0)
1357 return eirq;
1358
1359 if (irqs == num_channels + 1) {
1360 struct nbpf_channel *chan;
1361
1362 for (i = 0, chan = nbpf->chan; i <= num_channels;
1363 i++, chan++) {
1364
1365 if (irqbuf[i] == eirq)
1366 i++;
1367 chan->irq = irqbuf[i];
1368 }
1369
1370 if (chan != nbpf->chan + num_channels)
1371 return -EINVAL;
1372 } else {
1373
1374 if (irqbuf[0] == eirq)
1375 irq = irqbuf[1];
1376 else
1377 irq = irqbuf[0];
1378
1379 for (i = 0; i <= num_channels; i++)
1380 nbpf->chan[i].irq = irq;
1381 }
1382 }
1383
1384 ret = devm_request_irq(dev, eirq, nbpf_err_irq,
1385 IRQF_SHARED, "dma error", nbpf);
1386 if (ret < 0)
1387 return ret;
1388 nbpf->eirq = eirq;
1389
1390 INIT_LIST_HEAD(&dma_dev->channels);
1391
1392
1393 for (i = 0; i < num_channels; i++) {
1394 ret = nbpf_chan_probe(nbpf, i);
1395 if (ret < 0)
1396 return ret;
1397 }
1398
1399 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1400 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1401 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1402
1403
1404 dma_dev->device_alloc_chan_resources
1405 = nbpf_alloc_chan_resources;
1406 dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
1407 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1408 dma_dev->device_tx_status = nbpf_tx_status;
1409 dma_dev->device_issue_pending = nbpf_issue_pending;
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1421 dma_dev->device_config = nbpf_config;
1422 dma_dev->device_pause = nbpf_pause;
1423 dma_dev->device_terminate_all = nbpf_terminate_all;
1424
1425 dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1426 dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1427 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1428
1429 platform_set_drvdata(pdev, nbpf);
1430
1431 ret = clk_prepare_enable(nbpf->clk);
1432 if (ret < 0)
1433 return ret;
1434
1435 nbpf_configure(nbpf);
1436
1437 ret = dma_async_device_register(dma_dev);
1438 if (ret < 0)
1439 goto e_clk_off;
1440
1441 ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1442 if (ret < 0)
1443 goto e_dma_dev_unreg;
1444
1445 return 0;
1446
1447 e_dma_dev_unreg:
1448 dma_async_device_unregister(dma_dev);
1449 e_clk_off:
1450 clk_disable_unprepare(nbpf->clk);
1451
1452 return ret;
1453 }
1454
1455 static int nbpf_remove(struct platform_device *pdev)
1456 {
1457 struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1458 int i;
1459
1460 devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1461
1462 for (i = 0; i < nbpf->config->num_channels; i++) {
1463 struct nbpf_channel *chan = nbpf->chan + i;
1464
1465 devm_free_irq(&pdev->dev, chan->irq, chan);
1466
1467 tasklet_kill(&chan->tasklet);
1468 }
1469
1470 of_dma_controller_free(pdev->dev.of_node);
1471 dma_async_device_unregister(&nbpf->dma_dev);
1472 clk_disable_unprepare(nbpf->clk);
1473
1474 return 0;
1475 }
1476
1477 static const struct platform_device_id nbpf_ids[] = {
1478 {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
1479 {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
1480 {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
1481 {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
1482 {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
1483 {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
1484 {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
1485 {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
1486 {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
1487 {},
1488 };
1489 MODULE_DEVICE_TABLE(platform, nbpf_ids);
1490
1491 #ifdef CONFIG_PM
1492 static int nbpf_runtime_suspend(struct device *dev)
1493 {
1494 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1495 clk_disable_unprepare(nbpf->clk);
1496 return 0;
1497 }
1498
1499 static int nbpf_runtime_resume(struct device *dev)
1500 {
1501 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1502 return clk_prepare_enable(nbpf->clk);
1503 }
1504 #endif
1505
1506 static const struct dev_pm_ops nbpf_pm_ops = {
1507 SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
1508 };
1509
1510 static struct platform_driver nbpf_driver = {
1511 .driver = {
1512 .name = "dma-nbpf",
1513 .of_match_table = nbpf_match,
1514 .pm = &nbpf_pm_ops,
1515 },
1516 .id_table = nbpf_ids,
1517 .probe = nbpf_probe,
1518 .remove = nbpf_remove,
1519 };
1520
1521 module_platform_driver(nbpf_driver);
1522
1523 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1524 MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
1525 MODULE_LICENSE("GPL v2");