This source file includes following definitions.
- chnenbl_ofs
- sdma_config_ownership
- sdma_enable_channel
- sdma_run_channel0
- sdma_load_script
- sdma_event_enable
- sdma_event_disable
- to_sdma_desc
- sdma_start_desc
- sdma_update_channel_loop
- mxc_sdma_handle_channel_normal
- sdma_int_handler
- sdma_get_pc
- sdma_load_context
- to_sdma_chan
- sdma_disable_channel
- sdma_channel_terminate_work
- sdma_disable_channel_async
- sdma_channel_synchronize
- sdma_set_watermarklevel_for_p2p
- sdma_config_channel
- sdma_set_channel_priority
- sdma_request_channel0
- sdma_alloc_bd
- sdma_free_bd
- sdma_desc_free
- sdma_alloc_chan_resources
- sdma_free_chan_resources
- sdma_transfer_init
- sdma_prep_memcpy
- sdma_prep_slave_sg
- sdma_prep_dma_cyclic
- sdma_config_write
- sdma_config
- sdma_tx_status
- sdma_issue_pending
- sdma_add_scripts
- sdma_load_firmware
- sdma_event_remap
- sdma_get_firmware
- sdma_init
- sdma_filter_fn
- sdma_xlate
- sdma_probe
- sdma_remove
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/init.h>
14 #include <linux/iopoll.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/bitops.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/clk.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
23 #include <linux/semaphore.h>
24 #include <linux/spinlock.h>
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/firmware.h>
28 #include <linux/slab.h>
29 #include <linux/platform_device.h>
30 #include <linux/dmaengine.h>
31 #include <linux/of.h>
32 #include <linux/of_address.h>
33 #include <linux/of_device.h>
34 #include <linux/of_dma.h>
35 #include <linux/workqueue.h>
36
37 #include <asm/irq.h>
38 #include <linux/platform_data/dma-imx-sdma.h>
39 #include <linux/platform_data/dma-imx.h>
40 #include <linux/regmap.h>
41 #include <linux/mfd/syscon.h>
42 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
43
44 #include "dmaengine.h"
45 #include "virt-dma.h"
46
47
48 #define SDMA_H_C0PTR 0x000
49 #define SDMA_H_INTR 0x004
50 #define SDMA_H_STATSTOP 0x008
51 #define SDMA_H_START 0x00c
52 #define SDMA_H_EVTOVR 0x010
53 #define SDMA_H_DSPOVR 0x014
54 #define SDMA_H_HOSTOVR 0x018
55 #define SDMA_H_EVTPEND 0x01c
56 #define SDMA_H_DSPENBL 0x020
57 #define SDMA_H_RESET 0x024
58 #define SDMA_H_EVTERR 0x028
59 #define SDMA_H_INTRMSK 0x02c
60 #define SDMA_H_PSW 0x030
61 #define SDMA_H_EVTERRDBG 0x034
62 #define SDMA_H_CONFIG 0x038
63 #define SDMA_ONCE_ENB 0x040
64 #define SDMA_ONCE_DATA 0x044
65 #define SDMA_ONCE_INSTR 0x048
66 #define SDMA_ONCE_STAT 0x04c
67 #define SDMA_ONCE_CMD 0x050
68 #define SDMA_EVT_MIRROR 0x054
69 #define SDMA_ILLINSTADDR 0x058
70 #define SDMA_CHN0ADDR 0x05c
71 #define SDMA_ONCE_RTB 0x060
72 #define SDMA_XTRIG_CONF1 0x070
73 #define SDMA_XTRIG_CONF2 0x074
74 #define SDMA_CHNENBL0_IMX35 0x200
75 #define SDMA_CHNENBL0_IMX31 0x080
76 #define SDMA_CHNPRI_0 0x100
77
78
79
80
81 #define BD_DONE 0x01
82 #define BD_WRAP 0x02
83 #define BD_CONT 0x04
84 #define BD_INTR 0x08
85 #define BD_RROR 0x10
86 #define BD_LAST 0x20
87 #define BD_EXTD 0x80
88
89
90
91
92 #define DND_END_OF_FRAME 0x80
93 #define DND_END_OF_XFER 0x40
94 #define DND_DONE 0x20
95 #define DND_UNUSED 0x01
96
97
98
99
100 #define BD_IPCV2_END_OF_FRAME 0x40
101
102 #define IPCV2_MAX_NODES 50
103
104
105
106
107 #define DATA_ERROR 0x10000000
108
109
110
111
112 #define C0_ADDR 0x01
113 #define C0_LOAD 0x02
114 #define C0_DUMP 0x03
115 #define C0_SETCTX 0x07
116 #define C0_GETCTX 0x03
117 #define C0_SETDM 0x01
118 #define C0_SETPM 0x04
119 #define C0_GETDM 0x02
120 #define C0_GETPM 0x08
121
122
123
124 #define CHANGE_ENDIANNESS 0x80
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165 #define SDMA_WATERMARK_LEVEL_LWML 0xFF
166 #define SDMA_WATERMARK_LEVEL_PS BIT(8)
167 #define SDMA_WATERMARK_LEVEL_PA BIT(9)
168 #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
169 #define SDMA_WATERMARK_LEVEL_SP BIT(11)
170 #define SDMA_WATERMARK_LEVEL_DP BIT(12)
171 #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
172 #define SDMA_WATERMARK_LEVEL_LWE BIT(28)
173 #define SDMA_WATERMARK_LEVEL_HWE BIT(29)
174 #define SDMA_WATERMARK_LEVEL_CONT BIT(31)
175
176 #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
177 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
178 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
179
180 #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
181 BIT(DMA_MEM_TO_DEV) | \
182 BIT(DMA_DEV_TO_DEV))
183
184
185
186
187 struct sdma_mode_count {
188 #define SDMA_BD_MAX_CNT 0xffff
189 u32 count : 16;
190 u32 status : 8;
191 u32 command : 8;
192 };
193
194
195
196
197 struct sdma_buffer_descriptor {
198 struct sdma_mode_count mode;
199 u32 buffer_addr;
200 u32 ext_buffer_addr;
201 } __attribute__ ((packed));
202
203
204
205
206
207
208
209
210
211 struct sdma_channel_control {
212 u32 current_bd_ptr;
213 u32 base_bd_ptr;
214 u32 unused[2];
215 } __attribute__ ((packed));
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 struct sdma_state_registers {
233 u32 pc :14;
234 u32 unused1: 1;
235 u32 t : 1;
236 u32 rpc :14;
237 u32 unused0: 1;
238 u32 sf : 1;
239 u32 spc :14;
240 u32 unused2: 1;
241 u32 df : 1;
242 u32 epc :14;
243 u32 lm : 2;
244 } __attribute__ ((packed));
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274 struct sdma_context_data {
275 struct sdma_state_registers channel_state;
276 u32 gReg[8];
277 u32 mda;
278 u32 msa;
279 u32 ms;
280 u32 md;
281 u32 pda;
282 u32 psa;
283 u32 ps;
284 u32 pd;
285 u32 ca;
286 u32 cs;
287 u32 dda;
288 u32 dsa;
289 u32 ds;
290 u32 dd;
291 u32 scratch0;
292 u32 scratch1;
293 u32 scratch2;
294 u32 scratch3;
295 u32 scratch4;
296 u32 scratch5;
297 u32 scratch6;
298 u32 scratch7;
299 } __attribute__ ((packed));
300
301
302 struct sdma_engine;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 struct sdma_desc {
318 struct virt_dma_desc vd;
319 unsigned int num_bd;
320 dma_addr_t bd_phys;
321 unsigned int buf_tail;
322 unsigned int buf_ptail;
323 unsigned int period_len;
324 unsigned int chn_real_count;
325 unsigned int chn_count;
326 struct sdma_channel *sdmac;
327 struct sdma_buffer_descriptor *bd;
328 };
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360 struct sdma_channel {
361 struct virt_dma_chan vc;
362 struct sdma_desc *desc;
363 struct sdma_engine *sdma;
364 unsigned int channel;
365 enum dma_transfer_direction direction;
366 struct dma_slave_config slave_config;
367 enum sdma_peripheral_type peripheral_type;
368 unsigned int event_id0;
369 unsigned int event_id1;
370 enum dma_slave_buswidth word_size;
371 unsigned int pc_from_device, pc_to_device;
372 unsigned int device_to_device;
373 unsigned int pc_to_pc;
374 unsigned long flags;
375 dma_addr_t per_address, per_address2;
376 unsigned long event_mask[2];
377 unsigned long watermark_level;
378 u32 shp_addr, per_addr;
379 enum dma_status status;
380 bool context_loaded;
381 struct imx_dma_data data;
382 struct work_struct terminate_worker;
383 };
384
385 #define IMX_DMA_SG_LOOP BIT(0)
386
387 #define MAX_DMA_CHANNELS 32
388 #define MXC_SDMA_DEFAULT_PRIORITY 1
389 #define MXC_SDMA_MIN_PRIORITY 1
390 #define MXC_SDMA_MAX_PRIORITY 7
391
392 #define SDMA_FIRMWARE_MAGIC 0x414d4453
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408 struct sdma_firmware_header {
409 u32 magic;
410 u32 version_major;
411 u32 version_minor;
412 u32 script_addrs_start;
413 u32 num_script_addrs;
414 u32 ram_code_start;
415 u32 ram_code_size;
416 };
417
418 struct sdma_driver_data {
419 int chnenbl0;
420 int num_events;
421 struct sdma_script_start_addrs *script_addrs;
422 bool check_ratio;
423 };
424
425 struct sdma_engine {
426 struct device *dev;
427 struct device_dma_parameters dma_parms;
428 struct sdma_channel channel[MAX_DMA_CHANNELS];
429 struct sdma_channel_control *channel_control;
430 void __iomem *regs;
431 struct sdma_context_data *context;
432 dma_addr_t context_phys;
433 struct dma_device dma_device;
434 struct clk *clk_ipg;
435 struct clk *clk_ahb;
436 spinlock_t channel_0_lock;
437 u32 script_number;
438 struct sdma_script_start_addrs *script_addrs;
439 const struct sdma_driver_data *drvdata;
440 u32 spba_start_addr;
441 u32 spba_end_addr;
442 unsigned int irq;
443 dma_addr_t bd0_phys;
444 struct sdma_buffer_descriptor *bd0;
445
446 bool clk_ratio;
447 };
448
449 static int sdma_config_write(struct dma_chan *chan,
450 struct dma_slave_config *dmaengine_cfg,
451 enum dma_transfer_direction direction);
452
453 static struct sdma_driver_data sdma_imx31 = {
454 .chnenbl0 = SDMA_CHNENBL0_IMX31,
455 .num_events = 32,
456 };
457
458 static struct sdma_script_start_addrs sdma_script_imx25 = {
459 .ap_2_ap_addr = 729,
460 .uart_2_mcu_addr = 904,
461 .per_2_app_addr = 1255,
462 .mcu_2_app_addr = 834,
463 .uartsh_2_mcu_addr = 1120,
464 .per_2_shp_addr = 1329,
465 .mcu_2_shp_addr = 1048,
466 .ata_2_mcu_addr = 1560,
467 .mcu_2_ata_addr = 1479,
468 .app_2_per_addr = 1189,
469 .app_2_mcu_addr = 770,
470 .shp_2_per_addr = 1407,
471 .shp_2_mcu_addr = 979,
472 };
473
474 static struct sdma_driver_data sdma_imx25 = {
475 .chnenbl0 = SDMA_CHNENBL0_IMX35,
476 .num_events = 48,
477 .script_addrs = &sdma_script_imx25,
478 };
479
480 static struct sdma_driver_data sdma_imx35 = {
481 .chnenbl0 = SDMA_CHNENBL0_IMX35,
482 .num_events = 48,
483 };
484
485 static struct sdma_script_start_addrs sdma_script_imx51 = {
486 .ap_2_ap_addr = 642,
487 .uart_2_mcu_addr = 817,
488 .mcu_2_app_addr = 747,
489 .mcu_2_shp_addr = 961,
490 .ata_2_mcu_addr = 1473,
491 .mcu_2_ata_addr = 1392,
492 .app_2_per_addr = 1033,
493 .app_2_mcu_addr = 683,
494 .shp_2_per_addr = 1251,
495 .shp_2_mcu_addr = 892,
496 };
497
498 static struct sdma_driver_data sdma_imx51 = {
499 .chnenbl0 = SDMA_CHNENBL0_IMX35,
500 .num_events = 48,
501 .script_addrs = &sdma_script_imx51,
502 };
503
504 static struct sdma_script_start_addrs sdma_script_imx53 = {
505 .ap_2_ap_addr = 642,
506 .app_2_mcu_addr = 683,
507 .mcu_2_app_addr = 747,
508 .uart_2_mcu_addr = 817,
509 .shp_2_mcu_addr = 891,
510 .mcu_2_shp_addr = 960,
511 .uartsh_2_mcu_addr = 1032,
512 .spdif_2_mcu_addr = 1100,
513 .mcu_2_spdif_addr = 1134,
514 .firi_2_mcu_addr = 1193,
515 .mcu_2_firi_addr = 1290,
516 };
517
518 static struct sdma_driver_data sdma_imx53 = {
519 .chnenbl0 = SDMA_CHNENBL0_IMX35,
520 .num_events = 48,
521 .script_addrs = &sdma_script_imx53,
522 };
523
524 static struct sdma_script_start_addrs sdma_script_imx6q = {
525 .ap_2_ap_addr = 642,
526 .uart_2_mcu_addr = 817,
527 .mcu_2_app_addr = 747,
528 .per_2_per_addr = 6331,
529 .uartsh_2_mcu_addr = 1032,
530 .mcu_2_shp_addr = 960,
531 .app_2_mcu_addr = 683,
532 .shp_2_mcu_addr = 891,
533 .spdif_2_mcu_addr = 1100,
534 .mcu_2_spdif_addr = 1134,
535 };
536
537 static struct sdma_driver_data sdma_imx6q = {
538 .chnenbl0 = SDMA_CHNENBL0_IMX35,
539 .num_events = 48,
540 .script_addrs = &sdma_script_imx6q,
541 };
542
543 static struct sdma_script_start_addrs sdma_script_imx7d = {
544 .ap_2_ap_addr = 644,
545 .uart_2_mcu_addr = 819,
546 .mcu_2_app_addr = 749,
547 .uartsh_2_mcu_addr = 1034,
548 .mcu_2_shp_addr = 962,
549 .app_2_mcu_addr = 685,
550 .shp_2_mcu_addr = 893,
551 .spdif_2_mcu_addr = 1102,
552 .mcu_2_spdif_addr = 1136,
553 };
554
555 static struct sdma_driver_data sdma_imx7d = {
556 .chnenbl0 = SDMA_CHNENBL0_IMX35,
557 .num_events = 48,
558 .script_addrs = &sdma_script_imx7d,
559 };
560
561 static struct sdma_driver_data sdma_imx8mq = {
562 .chnenbl0 = SDMA_CHNENBL0_IMX35,
563 .num_events = 48,
564 .script_addrs = &sdma_script_imx7d,
565 .check_ratio = 1,
566 };
567
568 static const struct platform_device_id sdma_devtypes[] = {
569 {
570 .name = "imx25-sdma",
571 .driver_data = (unsigned long)&sdma_imx25,
572 }, {
573 .name = "imx31-sdma",
574 .driver_data = (unsigned long)&sdma_imx31,
575 }, {
576 .name = "imx35-sdma",
577 .driver_data = (unsigned long)&sdma_imx35,
578 }, {
579 .name = "imx51-sdma",
580 .driver_data = (unsigned long)&sdma_imx51,
581 }, {
582 .name = "imx53-sdma",
583 .driver_data = (unsigned long)&sdma_imx53,
584 }, {
585 .name = "imx6q-sdma",
586 .driver_data = (unsigned long)&sdma_imx6q,
587 }, {
588 .name = "imx7d-sdma",
589 .driver_data = (unsigned long)&sdma_imx7d,
590 }, {
591 .name = "imx8mq-sdma",
592 .driver_data = (unsigned long)&sdma_imx8mq,
593 }, {
594
595 }
596 };
597 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
598
599 static const struct of_device_id sdma_dt_ids[] = {
600 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
601 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
602 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
603 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
604 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
605 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
606 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
607 { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
608 { }
609 };
610 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
611
612 #define SDMA_H_CONFIG_DSPDMA BIT(12)
613 #define SDMA_H_CONFIG_RTD_PINS BIT(11)
614 #define SDMA_H_CONFIG_ACR BIT(4)
615 #define SDMA_H_CONFIG_CSM (3)
616
617 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
618 {
619 u32 chnenbl0 = sdma->drvdata->chnenbl0;
620 return chnenbl0 + event * 4;
621 }
622
623 static int sdma_config_ownership(struct sdma_channel *sdmac,
624 bool event_override, bool mcu_override, bool dsp_override)
625 {
626 struct sdma_engine *sdma = sdmac->sdma;
627 int channel = sdmac->channel;
628 unsigned long evt, mcu, dsp;
629
630 if (event_override && mcu_override && dsp_override)
631 return -EINVAL;
632
633 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
634 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
635 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
636
637 if (dsp_override)
638 __clear_bit(channel, &dsp);
639 else
640 __set_bit(channel, &dsp);
641
642 if (event_override)
643 __clear_bit(channel, &evt);
644 else
645 __set_bit(channel, &evt);
646
647 if (mcu_override)
648 __clear_bit(channel, &mcu);
649 else
650 __set_bit(channel, &mcu);
651
652 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
653 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
654 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
655
656 return 0;
657 }
658
659 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
660 {
661 writel(BIT(channel), sdma->regs + SDMA_H_START);
662 }
663
664
665
666
667 static int sdma_run_channel0(struct sdma_engine *sdma)
668 {
669 int ret;
670 u32 reg;
671
672 sdma_enable_channel(sdma, 0);
673
674 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
675 reg, !(reg & 1), 1, 500);
676 if (ret)
677 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
678
679
680 reg = readl(sdma->regs + SDMA_H_CONFIG);
681 if ((reg & SDMA_H_CONFIG_CSM) == 0) {
682 reg |= SDMA_H_CONFIG_CSM;
683 writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
684 }
685
686 return ret;
687 }
688
689 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
690 u32 address)
691 {
692 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
693 void *buf_virt;
694 dma_addr_t buf_phys;
695 int ret;
696 unsigned long flags;
697
698 buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
699 if (!buf_virt) {
700 return -ENOMEM;
701 }
702
703 spin_lock_irqsave(&sdma->channel_0_lock, flags);
704
705 bd0->mode.command = C0_SETPM;
706 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
707 bd0->mode.count = size / 2;
708 bd0->buffer_addr = buf_phys;
709 bd0->ext_buffer_addr = address;
710
711 memcpy(buf_virt, buf, size);
712
713 ret = sdma_run_channel0(sdma);
714
715 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
716
717 dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
718
719 return ret;
720 }
721
722 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
723 {
724 struct sdma_engine *sdma = sdmac->sdma;
725 int channel = sdmac->channel;
726 unsigned long val;
727 u32 chnenbl = chnenbl_ofs(sdma, event);
728
729 val = readl_relaxed(sdma->regs + chnenbl);
730 __set_bit(channel, &val);
731 writel_relaxed(val, sdma->regs + chnenbl);
732 }
733
734 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
735 {
736 struct sdma_engine *sdma = sdmac->sdma;
737 int channel = sdmac->channel;
738 u32 chnenbl = chnenbl_ofs(sdma, event);
739 unsigned long val;
740
741 val = readl_relaxed(sdma->regs + chnenbl);
742 __clear_bit(channel, &val);
743 writel_relaxed(val, sdma->regs + chnenbl);
744 }
745
746 static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
747 {
748 return container_of(t, struct sdma_desc, vd.tx);
749 }
750
751 static void sdma_start_desc(struct sdma_channel *sdmac)
752 {
753 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
754 struct sdma_desc *desc;
755 struct sdma_engine *sdma = sdmac->sdma;
756 int channel = sdmac->channel;
757
758 if (!vd) {
759 sdmac->desc = NULL;
760 return;
761 }
762 sdmac->desc = desc = to_sdma_desc(&vd->tx);
763
764
765
766
767 if (!(sdmac->flags & IMX_DMA_SG_LOOP))
768 list_del(&vd->node);
769
770 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
771 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
772 sdma_enable_channel(sdma, sdmac->channel);
773 }
774
775 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
776 {
777 struct sdma_buffer_descriptor *bd;
778 int error = 0;
779 enum dma_status old_status = sdmac->status;
780
781
782
783
784
785 while (sdmac->desc) {
786 struct sdma_desc *desc = sdmac->desc;
787
788 bd = &desc->bd[desc->buf_tail];
789
790 if (bd->mode.status & BD_DONE)
791 break;
792
793 if (bd->mode.status & BD_RROR) {
794 bd->mode.status &= ~BD_RROR;
795 sdmac->status = DMA_ERROR;
796 error = -EIO;
797 }
798
799
800
801
802
803
804 desc->chn_real_count = bd->mode.count;
805 bd->mode.status |= BD_DONE;
806 bd->mode.count = desc->period_len;
807 desc->buf_ptail = desc->buf_tail;
808 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
809
810
811
812
813
814
815
816 spin_unlock(&sdmac->vc.lock);
817 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
818 spin_lock(&sdmac->vc.lock);
819
820 if (error)
821 sdmac->status = old_status;
822 }
823 }
824
825 static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
826 {
827 struct sdma_channel *sdmac = (struct sdma_channel *) data;
828 struct sdma_buffer_descriptor *bd;
829 int i, error = 0;
830
831 sdmac->desc->chn_real_count = 0;
832
833
834
835
836 for (i = 0; i < sdmac->desc->num_bd; i++) {
837 bd = &sdmac->desc->bd[i];
838
839 if (bd->mode.status & (BD_DONE | BD_RROR))
840 error = -EIO;
841 sdmac->desc->chn_real_count += bd->mode.count;
842 }
843
844 if (error)
845 sdmac->status = DMA_ERROR;
846 else
847 sdmac->status = DMA_COMPLETE;
848 }
849
850 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
851 {
852 struct sdma_engine *sdma = dev_id;
853 unsigned long stat;
854
855 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
856 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
857
858 stat &= ~1;
859
860 while (stat) {
861 int channel = fls(stat) - 1;
862 struct sdma_channel *sdmac = &sdma->channel[channel];
863 struct sdma_desc *desc;
864
865 spin_lock(&sdmac->vc.lock);
866 desc = sdmac->desc;
867 if (desc) {
868 if (sdmac->flags & IMX_DMA_SG_LOOP) {
869 sdma_update_channel_loop(sdmac);
870 } else {
871 mxc_sdma_handle_channel_normal(sdmac);
872 vchan_cookie_complete(&desc->vd);
873 sdma_start_desc(sdmac);
874 }
875 }
876
877 spin_unlock(&sdmac->vc.lock);
878 __clear_bit(channel, &stat);
879 }
880
881 return IRQ_HANDLED;
882 }
883
884
885
886
887 static void sdma_get_pc(struct sdma_channel *sdmac,
888 enum sdma_peripheral_type peripheral_type)
889 {
890 struct sdma_engine *sdma = sdmac->sdma;
891 int per_2_emi = 0, emi_2_per = 0;
892
893
894
895
896 int per_2_per = 0, emi_2_emi = 0;
897
898 sdmac->pc_from_device = 0;
899 sdmac->pc_to_device = 0;
900 sdmac->device_to_device = 0;
901 sdmac->pc_to_pc = 0;
902
903 switch (peripheral_type) {
904 case IMX_DMATYPE_MEMORY:
905 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
906 break;
907 case IMX_DMATYPE_DSP:
908 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
909 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
910 break;
911 case IMX_DMATYPE_FIRI:
912 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
913 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
914 break;
915 case IMX_DMATYPE_UART:
916 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
917 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
918 break;
919 case IMX_DMATYPE_UART_SP:
920 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
921 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
922 break;
923 case IMX_DMATYPE_ATA:
924 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
925 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
926 break;
927 case IMX_DMATYPE_CSPI:
928 case IMX_DMATYPE_EXT:
929 case IMX_DMATYPE_SSI:
930 case IMX_DMATYPE_SAI:
931 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
932 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
933 break;
934 case IMX_DMATYPE_SSI_DUAL:
935 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
936 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
937 break;
938 case IMX_DMATYPE_SSI_SP:
939 case IMX_DMATYPE_MMC:
940 case IMX_DMATYPE_SDHC:
941 case IMX_DMATYPE_CSPI_SP:
942 case IMX_DMATYPE_ESAI:
943 case IMX_DMATYPE_MSHC_SP:
944 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
945 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
946 break;
947 case IMX_DMATYPE_ASRC:
948 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
949 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
950 per_2_per = sdma->script_addrs->per_2_per_addr;
951 break;
952 case IMX_DMATYPE_ASRC_SP:
953 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
954 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
955 per_2_per = sdma->script_addrs->per_2_per_addr;
956 break;
957 case IMX_DMATYPE_MSHC:
958 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
959 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
960 break;
961 case IMX_DMATYPE_CCM:
962 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
963 break;
964 case IMX_DMATYPE_SPDIF:
965 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
966 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
967 break;
968 case IMX_DMATYPE_IPU_MEMORY:
969 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
970 break;
971 default:
972 break;
973 }
974
975 sdmac->pc_from_device = per_2_emi;
976 sdmac->pc_to_device = emi_2_per;
977 sdmac->device_to_device = per_2_per;
978 sdmac->pc_to_pc = emi_2_emi;
979 }
980
981 static int sdma_load_context(struct sdma_channel *sdmac)
982 {
983 struct sdma_engine *sdma = sdmac->sdma;
984 int channel = sdmac->channel;
985 int load_address;
986 struct sdma_context_data *context = sdma->context;
987 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
988 int ret;
989 unsigned long flags;
990
991 if (sdmac->context_loaded)
992 return 0;
993
994 if (sdmac->direction == DMA_DEV_TO_MEM)
995 load_address = sdmac->pc_from_device;
996 else if (sdmac->direction == DMA_DEV_TO_DEV)
997 load_address = sdmac->device_to_device;
998 else if (sdmac->direction == DMA_MEM_TO_MEM)
999 load_address = sdmac->pc_to_pc;
1000 else
1001 load_address = sdmac->pc_to_device;
1002
1003 if (load_address < 0)
1004 return load_address;
1005
1006 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1007 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1008 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1009 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1010 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1011 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1012
1013 spin_lock_irqsave(&sdma->channel_0_lock, flags);
1014
1015 memset(context, 0, sizeof(*context));
1016 context->channel_state.pc = load_address;
1017
1018
1019
1020
1021 context->gReg[0] = sdmac->event_mask[1];
1022 context->gReg[1] = sdmac->event_mask[0];
1023 context->gReg[2] = sdmac->per_addr;
1024 context->gReg[6] = sdmac->shp_addr;
1025 context->gReg[7] = sdmac->watermark_level;
1026
1027 bd0->mode.command = C0_SETDM;
1028 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1029 bd0->mode.count = sizeof(*context) / 4;
1030 bd0->buffer_addr = sdma->context_phys;
1031 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1032 ret = sdma_run_channel0(sdma);
1033
1034 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1035
1036 sdmac->context_loaded = true;
1037
1038 return ret;
1039 }
1040
1041 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1042 {
1043 return container_of(chan, struct sdma_channel, vc.chan);
1044 }
1045
1046 static int sdma_disable_channel(struct dma_chan *chan)
1047 {
1048 struct sdma_channel *sdmac = to_sdma_chan(chan);
1049 struct sdma_engine *sdma = sdmac->sdma;
1050 int channel = sdmac->channel;
1051
1052 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1053 sdmac->status = DMA_ERROR;
1054
1055 return 0;
1056 }
1057 static void sdma_channel_terminate_work(struct work_struct *work)
1058 {
1059 struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1060 terminate_worker);
1061 unsigned long flags;
1062 LIST_HEAD(head);
1063
1064
1065
1066
1067
1068
1069
1070 usleep_range(1000, 2000);
1071
1072 spin_lock_irqsave(&sdmac->vc.lock, flags);
1073 vchan_get_all_descriptors(&sdmac->vc, &head);
1074 sdmac->desc = NULL;
1075 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1076 vchan_dma_desc_free_list(&sdmac->vc, &head);
1077 sdmac->context_loaded = false;
1078 }
1079
1080 static int sdma_disable_channel_async(struct dma_chan *chan)
1081 {
1082 struct sdma_channel *sdmac = to_sdma_chan(chan);
1083
1084 sdma_disable_channel(chan);
1085
1086 if (sdmac->desc)
1087 schedule_work(&sdmac->terminate_worker);
1088
1089 return 0;
1090 }
1091
1092 static void sdma_channel_synchronize(struct dma_chan *chan)
1093 {
1094 struct sdma_channel *sdmac = to_sdma_chan(chan);
1095
1096 vchan_synchronize(&sdmac->vc);
1097
1098 flush_work(&sdmac->terminate_worker);
1099 }
1100
1101 static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1102 {
1103 struct sdma_engine *sdma = sdmac->sdma;
1104
1105 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1106 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1107
1108 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1109 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1110
1111 if (sdmac->event_id0 > 31)
1112 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1113
1114 if (sdmac->event_id1 > 31)
1115 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1116
1117
1118
1119
1120
1121
1122 if (lwml > hwml) {
1123 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1124 SDMA_WATERMARK_LEVEL_HWML);
1125 sdmac->watermark_level |= hwml;
1126 sdmac->watermark_level |= lwml << 16;
1127 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1128 }
1129
1130 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1131 sdmac->per_address2 <= sdma->spba_end_addr)
1132 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1133
1134 if (sdmac->per_address >= sdma->spba_start_addr &&
1135 sdmac->per_address <= sdma->spba_end_addr)
1136 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1137
1138 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1139 }
1140
1141 static int sdma_config_channel(struct dma_chan *chan)
1142 {
1143 struct sdma_channel *sdmac = to_sdma_chan(chan);
1144 int ret;
1145
1146 sdma_disable_channel(chan);
1147
1148 sdmac->event_mask[0] = 0;
1149 sdmac->event_mask[1] = 0;
1150 sdmac->shp_addr = 0;
1151 sdmac->per_addr = 0;
1152
1153 switch (sdmac->peripheral_type) {
1154 case IMX_DMATYPE_DSP:
1155 sdma_config_ownership(sdmac, false, true, true);
1156 break;
1157 case IMX_DMATYPE_MEMORY:
1158 sdma_config_ownership(sdmac, false, true, false);
1159 break;
1160 default:
1161 sdma_config_ownership(sdmac, true, true, false);
1162 break;
1163 }
1164
1165 sdma_get_pc(sdmac, sdmac->peripheral_type);
1166
1167 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1168 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1169
1170 if (sdmac->event_id1) {
1171 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1172 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1173 sdma_set_watermarklevel_for_p2p(sdmac);
1174 } else
1175 __set_bit(sdmac->event_id0, sdmac->event_mask);
1176
1177
1178 sdmac->shp_addr = sdmac->per_address;
1179 sdmac->per_addr = sdmac->per_address2;
1180 } else {
1181 sdmac->watermark_level = 0;
1182 }
1183
1184 ret = sdma_load_context(sdmac);
1185
1186 return ret;
1187 }
1188
1189 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1190 unsigned int priority)
1191 {
1192 struct sdma_engine *sdma = sdmac->sdma;
1193 int channel = sdmac->channel;
1194
1195 if (priority < MXC_SDMA_MIN_PRIORITY
1196 || priority > MXC_SDMA_MAX_PRIORITY) {
1197 return -EINVAL;
1198 }
1199
1200 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1201
1202 return 0;
1203 }
1204
1205 static int sdma_request_channel0(struct sdma_engine *sdma)
1206 {
1207 int ret = -EBUSY;
1208
1209 sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1210 GFP_NOWAIT);
1211 if (!sdma->bd0) {
1212 ret = -ENOMEM;
1213 goto out;
1214 }
1215
1216 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1217 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1218
1219 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1220 return 0;
1221 out:
1222
1223 return ret;
1224 }
1225
1226
1227 static int sdma_alloc_bd(struct sdma_desc *desc)
1228 {
1229 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1230 int ret = 0;
1231
1232 desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1233 &desc->bd_phys, GFP_NOWAIT);
1234 if (!desc->bd) {
1235 ret = -ENOMEM;
1236 goto out;
1237 }
1238 out:
1239 return ret;
1240 }
1241
1242 static void sdma_free_bd(struct sdma_desc *desc)
1243 {
1244 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1245
1246 dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1247 desc->bd_phys);
1248 }
1249
1250 static void sdma_desc_free(struct virt_dma_desc *vd)
1251 {
1252 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1253
1254 sdma_free_bd(desc);
1255 kfree(desc);
1256 }
1257
1258 static int sdma_alloc_chan_resources(struct dma_chan *chan)
1259 {
1260 struct sdma_channel *sdmac = to_sdma_chan(chan);
1261 struct imx_dma_data *data = chan->private;
1262 struct imx_dma_data mem_data;
1263 int prio, ret;
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 if (!data) {
1275 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1276 mem_data.priority = 2;
1277 mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1278 mem_data.dma_request = 0;
1279 mem_data.dma_request2 = 0;
1280 data = &mem_data;
1281
1282 sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1283 }
1284
1285 switch (data->priority) {
1286 case DMA_PRIO_HIGH:
1287 prio = 3;
1288 break;
1289 case DMA_PRIO_MEDIUM:
1290 prio = 2;
1291 break;
1292 case DMA_PRIO_LOW:
1293 default:
1294 prio = 1;
1295 break;
1296 }
1297
1298 sdmac->peripheral_type = data->peripheral_type;
1299 sdmac->event_id0 = data->dma_request;
1300 sdmac->event_id1 = data->dma_request2;
1301
1302 ret = clk_enable(sdmac->sdma->clk_ipg);
1303 if (ret)
1304 return ret;
1305 ret = clk_enable(sdmac->sdma->clk_ahb);
1306 if (ret)
1307 goto disable_clk_ipg;
1308
1309 ret = sdma_set_channel_priority(sdmac, prio);
1310 if (ret)
1311 goto disable_clk_ahb;
1312
1313 return 0;
1314
1315 disable_clk_ahb:
1316 clk_disable(sdmac->sdma->clk_ahb);
1317 disable_clk_ipg:
1318 clk_disable(sdmac->sdma->clk_ipg);
1319 return ret;
1320 }
1321
1322 static void sdma_free_chan_resources(struct dma_chan *chan)
1323 {
1324 struct sdma_channel *sdmac = to_sdma_chan(chan);
1325 struct sdma_engine *sdma = sdmac->sdma;
1326
1327 sdma_disable_channel_async(chan);
1328
1329 sdma_channel_synchronize(chan);
1330
1331 if (sdmac->event_id0 >= 0)
1332 sdma_event_disable(sdmac, sdmac->event_id0);
1333 if (sdmac->event_id1)
1334 sdma_event_disable(sdmac, sdmac->event_id1);
1335
1336 sdmac->event_id0 = 0;
1337 sdmac->event_id1 = 0;
1338 sdmac->context_loaded = false;
1339
1340 sdma_set_channel_priority(sdmac, 0);
1341
1342 clk_disable(sdma->clk_ipg);
1343 clk_disable(sdma->clk_ahb);
1344 }
1345
1346 static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1347 enum dma_transfer_direction direction, u32 bds)
1348 {
1349 struct sdma_desc *desc;
1350
1351 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1352 if (!desc)
1353 goto err_out;
1354
1355 sdmac->status = DMA_IN_PROGRESS;
1356 sdmac->direction = direction;
1357 sdmac->flags = 0;
1358
1359 desc->chn_count = 0;
1360 desc->chn_real_count = 0;
1361 desc->buf_tail = 0;
1362 desc->buf_ptail = 0;
1363 desc->sdmac = sdmac;
1364 desc->num_bd = bds;
1365
1366 if (sdma_alloc_bd(desc))
1367 goto err_desc_out;
1368
1369
1370 if (direction == DMA_MEM_TO_MEM)
1371 sdma_config_ownership(sdmac, false, true, false);
1372
1373 if (sdma_load_context(sdmac))
1374 goto err_desc_out;
1375
1376 return desc;
1377
1378 err_desc_out:
1379 kfree(desc);
1380 err_out:
1381 return NULL;
1382 }
1383
1384 static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1385 struct dma_chan *chan, dma_addr_t dma_dst,
1386 dma_addr_t dma_src, size_t len, unsigned long flags)
1387 {
1388 struct sdma_channel *sdmac = to_sdma_chan(chan);
1389 struct sdma_engine *sdma = sdmac->sdma;
1390 int channel = sdmac->channel;
1391 size_t count;
1392 int i = 0, param;
1393 struct sdma_buffer_descriptor *bd;
1394 struct sdma_desc *desc;
1395
1396 if (!chan || !len)
1397 return NULL;
1398
1399 dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1400 &dma_src, &dma_dst, len, channel);
1401
1402 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1403 len / SDMA_BD_MAX_CNT + 1);
1404 if (!desc)
1405 return NULL;
1406
1407 do {
1408 count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1409 bd = &desc->bd[i];
1410 bd->buffer_addr = dma_src;
1411 bd->ext_buffer_addr = dma_dst;
1412 bd->mode.count = count;
1413 desc->chn_count += count;
1414 bd->mode.command = 0;
1415
1416 dma_src += count;
1417 dma_dst += count;
1418 len -= count;
1419 i++;
1420
1421 param = BD_DONE | BD_EXTD | BD_CONT;
1422
1423 if (!len) {
1424 param |= BD_INTR;
1425 param |= BD_LAST;
1426 param &= ~BD_CONT;
1427 }
1428
1429 dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1430 i, count, bd->buffer_addr,
1431 param & BD_WRAP ? "wrap" : "",
1432 param & BD_INTR ? " intr" : "");
1433
1434 bd->mode.status = param;
1435 } while (len);
1436
1437 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1438 }
1439
1440 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1441 struct dma_chan *chan, struct scatterlist *sgl,
1442 unsigned int sg_len, enum dma_transfer_direction direction,
1443 unsigned long flags, void *context)
1444 {
1445 struct sdma_channel *sdmac = to_sdma_chan(chan);
1446 struct sdma_engine *sdma = sdmac->sdma;
1447 int i, count;
1448 int channel = sdmac->channel;
1449 struct scatterlist *sg;
1450 struct sdma_desc *desc;
1451
1452 sdma_config_write(chan, &sdmac->slave_config, direction);
1453
1454 desc = sdma_transfer_init(sdmac, direction, sg_len);
1455 if (!desc)
1456 goto err_out;
1457
1458 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1459 sg_len, channel);
1460
1461 for_each_sg(sgl, sg, sg_len, i) {
1462 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1463 int param;
1464
1465 bd->buffer_addr = sg->dma_address;
1466
1467 count = sg_dma_len(sg);
1468
1469 if (count > SDMA_BD_MAX_CNT) {
1470 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1471 channel, count, SDMA_BD_MAX_CNT);
1472 goto err_bd_out;
1473 }
1474
1475 bd->mode.count = count;
1476 desc->chn_count += count;
1477
1478 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1479 goto err_bd_out;
1480
1481 switch (sdmac->word_size) {
1482 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1483 bd->mode.command = 0;
1484 if (count & 3 || sg->dma_address & 3)
1485 goto err_bd_out;
1486 break;
1487 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1488 bd->mode.command = 2;
1489 if (count & 1 || sg->dma_address & 1)
1490 goto err_bd_out;
1491 break;
1492 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1493 bd->mode.command = 1;
1494 break;
1495 default:
1496 goto err_bd_out;
1497 }
1498
1499 param = BD_DONE | BD_EXTD | BD_CONT;
1500
1501 if (i + 1 == sg_len) {
1502 param |= BD_INTR;
1503 param |= BD_LAST;
1504 param &= ~BD_CONT;
1505 }
1506
1507 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1508 i, count, (u64)sg->dma_address,
1509 param & BD_WRAP ? "wrap" : "",
1510 param & BD_INTR ? " intr" : "");
1511
1512 bd->mode.status = param;
1513 }
1514
1515 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1516 err_bd_out:
1517 sdma_free_bd(desc);
1518 kfree(desc);
1519 err_out:
1520 sdmac->status = DMA_ERROR;
1521 return NULL;
1522 }
1523
1524 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1525 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1526 size_t period_len, enum dma_transfer_direction direction,
1527 unsigned long flags)
1528 {
1529 struct sdma_channel *sdmac = to_sdma_chan(chan);
1530 struct sdma_engine *sdma = sdmac->sdma;
1531 int num_periods = buf_len / period_len;
1532 int channel = sdmac->channel;
1533 int i = 0, buf = 0;
1534 struct sdma_desc *desc;
1535
1536 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1537
1538 sdma_config_write(chan, &sdmac->slave_config, direction);
1539
1540 desc = sdma_transfer_init(sdmac, direction, num_periods);
1541 if (!desc)
1542 goto err_out;
1543
1544 desc->period_len = period_len;
1545
1546 sdmac->flags |= IMX_DMA_SG_LOOP;
1547
1548 if (period_len > SDMA_BD_MAX_CNT) {
1549 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1550 channel, period_len, SDMA_BD_MAX_CNT);
1551 goto err_bd_out;
1552 }
1553
1554 while (buf < buf_len) {
1555 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1556 int param;
1557
1558 bd->buffer_addr = dma_addr;
1559
1560 bd->mode.count = period_len;
1561
1562 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1563 goto err_bd_out;
1564 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1565 bd->mode.command = 0;
1566 else
1567 bd->mode.command = sdmac->word_size;
1568
1569 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1570 if (i + 1 == num_periods)
1571 param |= BD_WRAP;
1572
1573 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1574 i, period_len, (u64)dma_addr,
1575 param & BD_WRAP ? "wrap" : "",
1576 param & BD_INTR ? " intr" : "");
1577
1578 bd->mode.status = param;
1579
1580 dma_addr += period_len;
1581 buf += period_len;
1582
1583 i++;
1584 }
1585
1586 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1587 err_bd_out:
1588 sdma_free_bd(desc);
1589 kfree(desc);
1590 err_out:
1591 sdmac->status = DMA_ERROR;
1592 return NULL;
1593 }
1594
1595 static int sdma_config_write(struct dma_chan *chan,
1596 struct dma_slave_config *dmaengine_cfg,
1597 enum dma_transfer_direction direction)
1598 {
1599 struct sdma_channel *sdmac = to_sdma_chan(chan);
1600
1601 if (direction == DMA_DEV_TO_MEM) {
1602 sdmac->per_address = dmaengine_cfg->src_addr;
1603 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1604 dmaengine_cfg->src_addr_width;
1605 sdmac->word_size = dmaengine_cfg->src_addr_width;
1606 } else if (direction == DMA_DEV_TO_DEV) {
1607 sdmac->per_address2 = dmaengine_cfg->src_addr;
1608 sdmac->per_address = dmaengine_cfg->dst_addr;
1609 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1610 SDMA_WATERMARK_LEVEL_LWML;
1611 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1612 SDMA_WATERMARK_LEVEL_HWML;
1613 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1614 } else {
1615 sdmac->per_address = dmaengine_cfg->dst_addr;
1616 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1617 dmaengine_cfg->dst_addr_width;
1618 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1619 }
1620 sdmac->direction = direction;
1621 return sdma_config_channel(chan);
1622 }
1623
1624 static int sdma_config(struct dma_chan *chan,
1625 struct dma_slave_config *dmaengine_cfg)
1626 {
1627 struct sdma_channel *sdmac = to_sdma_chan(chan);
1628
1629 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1630
1631
1632 if (sdmac->event_id0 >= 0) {
1633 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1634 return -EINVAL;
1635 sdma_event_enable(sdmac, sdmac->event_id0);
1636 }
1637
1638 if (sdmac->event_id1) {
1639 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1640 return -EINVAL;
1641 sdma_event_enable(sdmac, sdmac->event_id1);
1642 }
1643
1644 return 0;
1645 }
1646
1647 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1648 dma_cookie_t cookie,
1649 struct dma_tx_state *txstate)
1650 {
1651 struct sdma_channel *sdmac = to_sdma_chan(chan);
1652 struct sdma_desc *desc;
1653 u32 residue;
1654 struct virt_dma_desc *vd;
1655 enum dma_status ret;
1656 unsigned long flags;
1657
1658 ret = dma_cookie_status(chan, cookie, txstate);
1659 if (ret == DMA_COMPLETE || !txstate)
1660 return ret;
1661
1662 spin_lock_irqsave(&sdmac->vc.lock, flags);
1663 vd = vchan_find_desc(&sdmac->vc, cookie);
1664 if (vd) {
1665 desc = to_sdma_desc(&vd->tx);
1666 if (sdmac->flags & IMX_DMA_SG_LOOP)
1667 residue = (desc->num_bd - desc->buf_ptail) *
1668 desc->period_len - desc->chn_real_count;
1669 else
1670 residue = desc->chn_count - desc->chn_real_count;
1671 } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1672 residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
1673 } else {
1674 residue = 0;
1675 }
1676 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1677
1678 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1679 residue);
1680
1681 return sdmac->status;
1682 }
1683
1684 static void sdma_issue_pending(struct dma_chan *chan)
1685 {
1686 struct sdma_channel *sdmac = to_sdma_chan(chan);
1687 unsigned long flags;
1688
1689 spin_lock_irqsave(&sdmac->vc.lock, flags);
1690 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1691 sdma_start_desc(sdmac);
1692 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1693 }
1694
1695 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1696 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1697 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
1698 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
1699
1700 static void sdma_add_scripts(struct sdma_engine *sdma,
1701 const struct sdma_script_start_addrs *addr)
1702 {
1703 s32 *addr_arr = (u32 *)addr;
1704 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1705 int i;
1706
1707
1708 if (!sdma->script_number)
1709 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1710
1711 if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1712 / sizeof(s32)) {
1713 dev_err(sdma->dev,
1714 "SDMA script number %d not match with firmware.\n",
1715 sdma->script_number);
1716 return;
1717 }
1718
1719 for (i = 0; i < sdma->script_number; i++)
1720 if (addr_arr[i] > 0)
1721 saddr_arr[i] = addr_arr[i];
1722 }
1723
1724 static void sdma_load_firmware(const struct firmware *fw, void *context)
1725 {
1726 struct sdma_engine *sdma = context;
1727 const struct sdma_firmware_header *header;
1728 const struct sdma_script_start_addrs *addr;
1729 unsigned short *ram_code;
1730
1731 if (!fw) {
1732 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1733
1734 return;
1735 }
1736
1737 if (fw->size < sizeof(*header))
1738 goto err_firmware;
1739
1740 header = (struct sdma_firmware_header *)fw->data;
1741
1742 if (header->magic != SDMA_FIRMWARE_MAGIC)
1743 goto err_firmware;
1744 if (header->ram_code_start + header->ram_code_size > fw->size)
1745 goto err_firmware;
1746 switch (header->version_major) {
1747 case 1:
1748 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1749 break;
1750 case 2:
1751 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1752 break;
1753 case 3:
1754 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1755 break;
1756 case 4:
1757 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1758 break;
1759 default:
1760 dev_err(sdma->dev, "unknown firmware version\n");
1761 goto err_firmware;
1762 }
1763
1764 addr = (void *)header + header->script_addrs_start;
1765 ram_code = (void *)header + header->ram_code_start;
1766
1767 clk_enable(sdma->clk_ipg);
1768 clk_enable(sdma->clk_ahb);
1769
1770 sdma_load_script(sdma, ram_code,
1771 header->ram_code_size,
1772 addr->ram_code_start_addr);
1773 clk_disable(sdma->clk_ipg);
1774 clk_disable(sdma->clk_ahb);
1775
1776 sdma_add_scripts(sdma, addr);
1777
1778 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1779 header->version_major,
1780 header->version_minor);
1781
1782 err_firmware:
1783 release_firmware(fw);
1784 }
1785
1786 #define EVENT_REMAP_CELLS 3
1787
1788 static int sdma_event_remap(struct sdma_engine *sdma)
1789 {
1790 struct device_node *np = sdma->dev->of_node;
1791 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1792 struct property *event_remap;
1793 struct regmap *gpr;
1794 char propname[] = "fsl,sdma-event-remap";
1795 u32 reg, val, shift, num_map, i;
1796 int ret = 0;
1797
1798 if (IS_ERR(np) || IS_ERR(gpr_np))
1799 goto out;
1800
1801 event_remap = of_find_property(np, propname, NULL);
1802 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1803 if (!num_map) {
1804 dev_dbg(sdma->dev, "no event needs to be remapped\n");
1805 goto out;
1806 } else if (num_map % EVENT_REMAP_CELLS) {
1807 dev_err(sdma->dev, "the property %s must modulo %d\n",
1808 propname, EVENT_REMAP_CELLS);
1809 ret = -EINVAL;
1810 goto out;
1811 }
1812
1813 gpr = syscon_node_to_regmap(gpr_np);
1814 if (IS_ERR(gpr)) {
1815 dev_err(sdma->dev, "failed to get gpr regmap\n");
1816 ret = PTR_ERR(gpr);
1817 goto out;
1818 }
1819
1820 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1821 ret = of_property_read_u32_index(np, propname, i, ®);
1822 if (ret) {
1823 dev_err(sdma->dev, "failed to read property %s index %d\n",
1824 propname, i);
1825 goto out;
1826 }
1827
1828 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1829 if (ret) {
1830 dev_err(sdma->dev, "failed to read property %s index %d\n",
1831 propname, i + 1);
1832 goto out;
1833 }
1834
1835 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1836 if (ret) {
1837 dev_err(sdma->dev, "failed to read property %s index %d\n",
1838 propname, i + 2);
1839 goto out;
1840 }
1841
1842 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1843 }
1844
1845 out:
1846 if (!IS_ERR(gpr_np))
1847 of_node_put(gpr_np);
1848
1849 return ret;
1850 }
1851
1852 static int sdma_get_firmware(struct sdma_engine *sdma,
1853 const char *fw_name)
1854 {
1855 int ret;
1856
1857 ret = request_firmware_nowait(THIS_MODULE,
1858 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1859 GFP_KERNEL, sdma, sdma_load_firmware);
1860
1861 return ret;
1862 }
1863
1864 static int sdma_init(struct sdma_engine *sdma)
1865 {
1866 int i, ret;
1867 dma_addr_t ccb_phys;
1868
1869 ret = clk_enable(sdma->clk_ipg);
1870 if (ret)
1871 return ret;
1872 ret = clk_enable(sdma->clk_ahb);
1873 if (ret)
1874 goto disable_clk_ipg;
1875
1876 if (sdma->drvdata->check_ratio &&
1877 (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
1878 sdma->clk_ratio = 1;
1879
1880
1881 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1882
1883 sdma->channel_control = dma_alloc_coherent(sdma->dev,
1884 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1885 sizeof(struct sdma_context_data),
1886 &ccb_phys, GFP_KERNEL);
1887
1888 if (!sdma->channel_control) {
1889 ret = -ENOMEM;
1890 goto err_dma_alloc;
1891 }
1892
1893 sdma->context = (void *)sdma->channel_control +
1894 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1895 sdma->context_phys = ccb_phys +
1896 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1897
1898
1899 for (i = 0; i < sdma->drvdata->num_events; i++)
1900 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1901
1902
1903 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1904 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1905
1906 ret = sdma_request_channel0(sdma);
1907 if (ret)
1908 goto err_dma_alloc;
1909
1910 sdma_config_ownership(&sdma->channel[0], false, true, false);
1911
1912
1913 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1914
1915
1916 if (sdma->clk_ratio)
1917 writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
1918 else
1919 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1920
1921 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1922
1923
1924 sdma_set_channel_priority(&sdma->channel[0], 7);
1925
1926 clk_disable(sdma->clk_ipg);
1927 clk_disable(sdma->clk_ahb);
1928
1929 return 0;
1930
1931 err_dma_alloc:
1932 clk_disable(sdma->clk_ahb);
1933 disable_clk_ipg:
1934 clk_disable(sdma->clk_ipg);
1935 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1936 return ret;
1937 }
1938
1939 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1940 {
1941 struct sdma_channel *sdmac = to_sdma_chan(chan);
1942 struct imx_dma_data *data = fn_param;
1943
1944 if (!imx_dma_is_general_purpose(chan))
1945 return false;
1946
1947 sdmac->data = *data;
1948 chan->private = &sdmac->data;
1949
1950 return true;
1951 }
1952
1953 static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1954 struct of_dma *ofdma)
1955 {
1956 struct sdma_engine *sdma = ofdma->of_dma_data;
1957 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1958 struct imx_dma_data data;
1959
1960 if (dma_spec->args_count != 3)
1961 return NULL;
1962
1963 data.dma_request = dma_spec->args[0];
1964 data.peripheral_type = dma_spec->args[1];
1965 data.priority = dma_spec->args[2];
1966
1967
1968
1969
1970
1971
1972
1973 data.dma_request2 = 0;
1974
1975 return __dma_request_channel(&mask, sdma_filter_fn, &data,
1976 ofdma->of_node);
1977 }
1978
1979 static int sdma_probe(struct platform_device *pdev)
1980 {
1981 const struct of_device_id *of_id =
1982 of_match_device(sdma_dt_ids, &pdev->dev);
1983 struct device_node *np = pdev->dev.of_node;
1984 struct device_node *spba_bus;
1985 const char *fw_name;
1986 int ret;
1987 int irq;
1988 struct resource *iores;
1989 struct resource spba_res;
1990 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1991 int i;
1992 struct sdma_engine *sdma;
1993 s32 *saddr_arr;
1994 const struct sdma_driver_data *drvdata = NULL;
1995
1996 if (of_id)
1997 drvdata = of_id->data;
1998 else if (pdev->id_entry)
1999 drvdata = (void *)pdev->id_entry->driver_data;
2000
2001 if (!drvdata) {
2002 dev_err(&pdev->dev, "unable to find driver data\n");
2003 return -EINVAL;
2004 }
2005
2006 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2007 if (ret)
2008 return ret;
2009
2010 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2011 if (!sdma)
2012 return -ENOMEM;
2013
2014 spin_lock_init(&sdma->channel_0_lock);
2015
2016 sdma->dev = &pdev->dev;
2017 sdma->drvdata = drvdata;
2018
2019 irq = platform_get_irq(pdev, 0);
2020 if (irq < 0)
2021 return irq;
2022
2023 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2024 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
2025 if (IS_ERR(sdma->regs))
2026 return PTR_ERR(sdma->regs);
2027
2028 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2029 if (IS_ERR(sdma->clk_ipg))
2030 return PTR_ERR(sdma->clk_ipg);
2031
2032 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2033 if (IS_ERR(sdma->clk_ahb))
2034 return PTR_ERR(sdma->clk_ahb);
2035
2036 ret = clk_prepare(sdma->clk_ipg);
2037 if (ret)
2038 return ret;
2039
2040 ret = clk_prepare(sdma->clk_ahb);
2041 if (ret)
2042 goto err_clk;
2043
2044 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
2045 sdma);
2046 if (ret)
2047 goto err_irq;
2048
2049 sdma->irq = irq;
2050
2051 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2052 if (!sdma->script_addrs) {
2053 ret = -ENOMEM;
2054 goto err_irq;
2055 }
2056
2057
2058 saddr_arr = (s32 *)sdma->script_addrs;
2059 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
2060 saddr_arr[i] = -EINVAL;
2061
2062 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2063 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2064 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2065
2066 INIT_LIST_HEAD(&sdma->dma_device.channels);
2067
2068 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2069 struct sdma_channel *sdmac = &sdma->channel[i];
2070
2071 sdmac->sdma = sdma;
2072
2073 sdmac->channel = i;
2074 sdmac->vc.desc_free = sdma_desc_free;
2075 INIT_WORK(&sdmac->terminate_worker,
2076 sdma_channel_terminate_work);
2077
2078
2079
2080
2081
2082 if (i)
2083 vchan_init(&sdmac->vc, &sdma->dma_device);
2084 }
2085
2086 ret = sdma_init(sdma);
2087 if (ret)
2088 goto err_init;
2089
2090 ret = sdma_event_remap(sdma);
2091 if (ret)
2092 goto err_init;
2093
2094 if (sdma->drvdata->script_addrs)
2095 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2096 if (pdata && pdata->script_addrs)
2097 sdma_add_scripts(sdma, pdata->script_addrs);
2098
2099 sdma->dma_device.dev = &pdev->dev;
2100
2101 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2102 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2103 sdma->dma_device.device_tx_status = sdma_tx_status;
2104 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2105 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2106 sdma->dma_device.device_config = sdma_config;
2107 sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
2108 sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2109 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2110 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2111 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2112 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2113 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2114 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2115 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
2116 sdma->dma_device.copy_align = 2;
2117 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2118
2119 platform_set_drvdata(pdev, sdma);
2120
2121 ret = dma_async_device_register(&sdma->dma_device);
2122 if (ret) {
2123 dev_err(&pdev->dev, "unable to register\n");
2124 goto err_init;
2125 }
2126
2127 if (np) {
2128 ret = of_dma_controller_register(np, sdma_xlate, sdma);
2129 if (ret) {
2130 dev_err(&pdev->dev, "failed to register controller\n");
2131 goto err_register;
2132 }
2133
2134 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2135 ret = of_address_to_resource(spba_bus, 0, &spba_res);
2136 if (!ret) {
2137 sdma->spba_start_addr = spba_res.start;
2138 sdma->spba_end_addr = spba_res.end;
2139 }
2140 of_node_put(spba_bus);
2141 }
2142
2143
2144
2145
2146
2147
2148
2149 if (pdata) {
2150 ret = sdma_get_firmware(sdma, pdata->fw_name);
2151 if (ret)
2152 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2153 } else {
2154
2155
2156
2157
2158
2159 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2160 &fw_name);
2161 if (ret) {
2162 dev_warn(&pdev->dev, "failed to get firmware name\n");
2163 } else {
2164 ret = sdma_get_firmware(sdma, fw_name);
2165 if (ret)
2166 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2167 }
2168 }
2169
2170 return 0;
2171
2172 err_register:
2173 dma_async_device_unregister(&sdma->dma_device);
2174 err_init:
2175 kfree(sdma->script_addrs);
2176 err_irq:
2177 clk_unprepare(sdma->clk_ahb);
2178 err_clk:
2179 clk_unprepare(sdma->clk_ipg);
2180 return ret;
2181 }
2182
2183 static int sdma_remove(struct platform_device *pdev)
2184 {
2185 struct sdma_engine *sdma = platform_get_drvdata(pdev);
2186 int i;
2187
2188 devm_free_irq(&pdev->dev, sdma->irq, sdma);
2189 dma_async_device_unregister(&sdma->dma_device);
2190 kfree(sdma->script_addrs);
2191 clk_unprepare(sdma->clk_ahb);
2192 clk_unprepare(sdma->clk_ipg);
2193
2194 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2195 struct sdma_channel *sdmac = &sdma->channel[i];
2196
2197 tasklet_kill(&sdmac->vc.task);
2198 sdma_free_chan_resources(&sdmac->vc.chan);
2199 }
2200
2201 platform_set_drvdata(pdev, NULL);
2202 return 0;
2203 }
2204
2205 static struct platform_driver sdma_driver = {
2206 .driver = {
2207 .name = "imx-sdma",
2208 .of_match_table = sdma_dt_ids,
2209 },
2210 .id_table = sdma_devtypes,
2211 .remove = sdma_remove,
2212 .probe = sdma_probe,
2213 };
2214
2215 module_platform_driver(sdma_driver);
2216
2217 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2218 MODULE_DESCRIPTION("i.MX SDMA driver");
2219 #if IS_ENABLED(CONFIG_SOC_IMX6Q)
2220 MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2221 #endif
2222 #if IS_ENABLED(CONFIG_SOC_IMX7D)
2223 MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2224 #endif
2225 MODULE_LICENSE("GPL");