This source file includes following definitions.
- chan2dev
- chan_is_physical
- chan_is_logical
- chan_base
- d40_pool_lli_alloc
- d40_pool_lli_free
- d40_lcla_alloc_one
- d40_lcla_free_all
- d40_desc_remove
- d40_desc_get
- d40_desc_free
- d40_desc_submit
- d40_phy_lli_load
- d40_desc_done
- d40_log_lli_to_lcxa
- d40_desc_load
- d40_first_active_get
- d40_desc_queue
- d40_first_pending
- d40_first_queued
- d40_first_done
- d40_psize_2_burst_size
- d40_size_2_dmalen
- d40_sg_2_dmalen
- __d40_execute_command_phy
- d40_term_all
- __d40_config_set_event
- d40_config_set_event
- d40_chan_has_events
- __d40_execute_command_log
- d40_channel_execute_command
- d40_get_prmo
- d40_config_write
- d40_residue
- d40_tx_is_linked
- d40_pause
- d40_resume
- d40_tx_submit
- d40_start
- d40_queue_start
- dma_tc_handle
- dma_tasklet
- d40_handle_interrupt
- d40_validate_conf
- d40_alloc_mask_set
- d40_alloc_mask_free
- d40_allocate_channel
- d40_config_memcpy
- d40_free_dma
- d40_is_paused
- stedma40_residue
- d40_prep_sg_log
- d40_prep_sg_phy
- d40_prep_desc
- d40_prep_sg
- stedma40_filter
- __d40_set_prio_rt
- d40_set_prio_realtime
- d40_xlate
- d40_alloc_chan_resources
- d40_free_chan_resources
- d40_prep_memcpy
- d40_prep_slave_sg
- dma40_prep_dma_cyclic
- d40_tx_status
- d40_issue_pending
- d40_terminate_all
- dma40_config_to_halfchannel
- d40_set_runtime_config
- d40_set_runtime_config_write
- d40_chan_init
- d40_ops_init
- d40_dmaengine_init
- dma40_suspend
- dma40_resume
- dma40_backup
- d40_save_restore_registers
- dma40_runtime_suspend
- dma40_runtime_resume
- d40_phy_res_init
- d40_hw_detect_init
- d40_hw_init
- d40_lcla_allocate
- d40_of_probe
- d40_probe
- stedma40_init
1
2
3
4
5
6
7
8
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/log2.h>
18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/err.h>
21 #include <linux/of.h>
22 #include <linux/of_dma.h>
23 #include <linux/amba/bus.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/platform_data/dma-ste-dma40.h>
26
27 #include "dmaengine.h"
28 #include "ste_dma40_ll.h"
29
30 #define D40_NAME "dma40"
31
32 #define D40_PHY_CHAN -1
33
34
35 #define D40_CHAN_POS(chan) (2 * (chan / 2))
36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38
39 #define D40_SUSPEND_MAX_IT 500
40
41
42 #define DMA40_AUTOSUSPEND_DELAY 100
43
44
45 #define LCLA_ALIGNMENT 0x40000
46
47
48 #define D40_LCLA_LINK_PER_EVENT_GRP 128
49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51
52 #define D40_MAX_LOG_CHAN_PER_PHY 32
53
54
55 #define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57
58 #define D40_ALLOC_FREE BIT(31)
59 #define D40_ALLOC_PHY BIT(30)
60 #define D40_ALLOC_LOG_FREE 0
61
62 #define D40_MEMCPY_MAX_CHANS 8
63
64
65 #define DB8500_DMA_MEMCPY_EV_0 51
66 #define DB8500_DMA_MEMCPY_EV_1 56
67 #define DB8500_DMA_MEMCPY_EV_2 57
68 #define DB8500_DMA_MEMCPY_EV_3 58
69 #define DB8500_DMA_MEMCPY_EV_4 59
70 #define DB8500_DMA_MEMCPY_EV_5 60
71
72 static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
79 };
80
81
82 static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93 };
94
95
96 static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107 };
108
109
110
111
112
113
114
115
116
117 enum d40_command {
118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
122 };
123
124
125
126
127
128
129
130
131
132
133 enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
138 };
139
140
141
142
143
144
145 static __maybe_unused u32 d40_backup_regs[] = {
146 D40_DREG_LCPA,
147 D40_DREG_LCLA,
148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO,
152 };
153
154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156
157
158
159
160
161
162
163
164
165
166
167
168 static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4,
185 };
186
187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189 static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5,
210 };
211
212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214 static __maybe_unused u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK,
223 };
224
225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228
229
230
231
232
233
234
235
236
237 struct d40_interrupt_lookup {
238 u32 src;
239 u32 clr;
240 bool is_error;
241 int offset;
242 };
243
244
245 static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256 };
257
258 static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271 };
272
273
274
275
276
277
278
279 struct d40_reg_val {
280 unsigned int reg;
281 unsigned int val;
282 };
283
284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287
288
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301 };
302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305
306
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322 };
323
324
325
326
327
328
329
330
331
332
333
334
335 struct d40_lli_pool {
336 void *base;
337 int size;
338 dma_addr_t dma_addr;
339
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341 };
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362 struct d40_desc {
363
364 struct d40_phy_lli_bidir lli_phy;
365
366 struct d40_log_lli_bidir lli_log;
367
368 struct d40_lli_pool lli_pool;
369 int lli_len;
370 int lli_current;
371 int lcla_alloc;
372
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
375
376 bool is_in_client_list;
377 bool cyclic;
378 };
379
380
381
382
383
384
385
386
387
388
389
390
391 struct d40_lcla_pool {
392 void *base;
393 dma_addr_t dma_addr;
394 void *base_unaligned;
395 int pages;
396 spinlock_t lock;
397 struct d40_desc **alloc_map;
398 };
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 struct d40_phy_res {
415 spinlock_t lock;
416 bool reserved;
417 int num;
418 u32 allocated_src;
419 u32 allocated_dst;
420 bool use_soft_lli;
421 };
422
423 struct d40_base;
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457 struct d40_chan {
458 spinlock_t lock;
459 int log_num;
460 int pending_tx;
461 bool busy;
462 struct d40_phy_res *phy_chan;
463 struct dma_chan chan;
464 struct tasklet_struct tasklet;
465 struct list_head client;
466 struct list_head pending_queue;
467 struct list_head active;
468 struct list_head done;
469 struct list_head queue;
470 struct list_head prepare_queue;
471 struct stedma40_chan_cfg dma_cfg;
472 struct dma_slave_config slave_config;
473 bool configured;
474 struct d40_base *base;
475
476 u32 src_def_cfg;
477 u32 dst_def_cfg;
478 struct d40_def_lcsp log_def;
479 struct d40_log_lli_full *lcpa;
480
481 dma_addr_t runtime_addr;
482 enum dma_transfer_direction runtime_direction;
483 };
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502 struct d40_gen_dmac {
503 u32 *backup;
504 u32 backup_size;
505 u32 realtime_en;
506 u32 realtime_clear;
507 u32 high_prio_en;
508 u32 high_prio_clear;
509 u32 interrupt_en;
510 u32 interrupt_clear;
511 struct d40_interrupt_lookup *il;
512 u32 il_size;
513 struct d40_reg_val *init_reg;
514 u32 init_reg_size;
515 };
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565 struct d40_base {
566 spinlock_t interrupt_lock;
567 spinlock_t execmd_lock;
568 struct device *dev;
569 void __iomem *virtbase;
570 u8 rev:4;
571 struct clk *clk;
572 phys_addr_t phy_start;
573 resource_size_t phy_size;
574 int irq;
575 int num_memcpy_chans;
576 int num_phy_chans;
577 int num_log_chans;
578 struct device_dma_parameters dma_parms;
579 struct dma_device dma_both;
580 struct dma_device dma_slave;
581 struct dma_device dma_memcpy;
582 struct d40_chan *phy_chans;
583 struct d40_chan *log_chans;
584 struct d40_chan **lookup_log_chans;
585 struct d40_chan **lookup_phy_chans;
586 struct stedma40_platform_data *plat_data;
587 struct regulator *lcpa_regulator;
588
589 struct d40_phy_res *phy_res;
590 struct d40_lcla_pool lcla_pool;
591 void *lcpa_base;
592 dma_addr_t phy_lcpa;
593 resource_size_t lcpa_size;
594 struct kmem_cache *desc_slab;
595 u32 reg_val_backup[BACKUP_REGS_SZ];
596 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
597 u32 *reg_val_backup_chan;
598 u32 *regs_interrupt;
599 u16 gcc_pwr_off_mask;
600 struct d40_gen_dmac gen_dmac;
601 };
602
603 static struct device *chan2dev(struct d40_chan *d40c)
604 {
605 return &d40c->chan.dev->device;
606 }
607
608 static bool chan_is_physical(struct d40_chan *chan)
609 {
610 return chan->log_num == D40_PHY_CHAN;
611 }
612
613 static bool chan_is_logical(struct d40_chan *chan)
614 {
615 return !chan_is_physical(chan);
616 }
617
618 static void __iomem *chan_base(struct d40_chan *chan)
619 {
620 return chan->base->virtbase + D40_DREG_PCBASE +
621 chan->phy_chan->num * D40_DREG_PCDELTA;
622 }
623
624 #define d40_err(dev, format, arg...) \
625 dev_err(dev, "[%s] " format, __func__, ## arg)
626
627 #define chan_err(d40c, format, arg...) \
628 d40_err(chan2dev(d40c), format, ## arg)
629
630 static int d40_set_runtime_config_write(struct dma_chan *chan,
631 struct dma_slave_config *config,
632 enum dma_transfer_direction direction);
633
634 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
635 int lli_len)
636 {
637 bool is_log = chan_is_logical(d40c);
638 u32 align;
639 void *base;
640
641 if (is_log)
642 align = sizeof(struct d40_log_lli);
643 else
644 align = sizeof(struct d40_phy_lli);
645
646 if (lli_len == 1) {
647 base = d40d->lli_pool.pre_alloc_lli;
648 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
649 d40d->lli_pool.base = NULL;
650 } else {
651 d40d->lli_pool.size = lli_len * 2 * align;
652
653 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
654 d40d->lli_pool.base = base;
655
656 if (d40d->lli_pool.base == NULL)
657 return -ENOMEM;
658 }
659
660 if (is_log) {
661 d40d->lli_log.src = PTR_ALIGN(base, align);
662 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
663
664 d40d->lli_pool.dma_addr = 0;
665 } else {
666 d40d->lli_phy.src = PTR_ALIGN(base, align);
667 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
668
669 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
670 d40d->lli_phy.src,
671 d40d->lli_pool.size,
672 DMA_TO_DEVICE);
673
674 if (dma_mapping_error(d40c->base->dev,
675 d40d->lli_pool.dma_addr)) {
676 kfree(d40d->lli_pool.base);
677 d40d->lli_pool.base = NULL;
678 d40d->lli_pool.dma_addr = 0;
679 return -ENOMEM;
680 }
681 }
682
683 return 0;
684 }
685
686 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
687 {
688 if (d40d->lli_pool.dma_addr)
689 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
690 d40d->lli_pool.size, DMA_TO_DEVICE);
691
692 kfree(d40d->lli_pool.base);
693 d40d->lli_pool.base = NULL;
694 d40d->lli_pool.size = 0;
695 d40d->lli_log.src = NULL;
696 d40d->lli_log.dst = NULL;
697 d40d->lli_phy.src = NULL;
698 d40d->lli_phy.dst = NULL;
699 }
700
701 static int d40_lcla_alloc_one(struct d40_chan *d40c,
702 struct d40_desc *d40d)
703 {
704 unsigned long flags;
705 int i;
706 int ret = -EINVAL;
707
708 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
709
710
711
712
713
714 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
715 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
716
717 if (!d40c->base->lcla_pool.alloc_map[idx]) {
718 d40c->base->lcla_pool.alloc_map[idx] = d40d;
719 d40d->lcla_alloc++;
720 ret = i;
721 break;
722 }
723 }
724
725 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
726
727 return ret;
728 }
729
730 static int d40_lcla_free_all(struct d40_chan *d40c,
731 struct d40_desc *d40d)
732 {
733 unsigned long flags;
734 int i;
735 int ret = -EINVAL;
736
737 if (chan_is_physical(d40c))
738 return 0;
739
740 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
741
742 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
743 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
744
745 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
746 d40c->base->lcla_pool.alloc_map[idx] = NULL;
747 d40d->lcla_alloc--;
748 if (d40d->lcla_alloc == 0) {
749 ret = 0;
750 break;
751 }
752 }
753 }
754
755 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
756
757 return ret;
758
759 }
760
761 static void d40_desc_remove(struct d40_desc *d40d)
762 {
763 list_del(&d40d->node);
764 }
765
766 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
767 {
768 struct d40_desc *desc = NULL;
769
770 if (!list_empty(&d40c->client)) {
771 struct d40_desc *d;
772 struct d40_desc *_d;
773
774 list_for_each_entry_safe(d, _d, &d40c->client, node) {
775 if (async_tx_test_ack(&d->txd)) {
776 d40_desc_remove(d);
777 desc = d;
778 memset(desc, 0, sizeof(*desc));
779 break;
780 }
781 }
782 }
783
784 if (!desc)
785 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
786
787 if (desc)
788 INIT_LIST_HEAD(&desc->node);
789
790 return desc;
791 }
792
793 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
794 {
795
796 d40_pool_lli_free(d40c, d40d);
797 d40_lcla_free_all(d40c, d40d);
798 kmem_cache_free(d40c->base->desc_slab, d40d);
799 }
800
801 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
802 {
803 list_add_tail(&desc->node, &d40c->active);
804 }
805
806 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
807 {
808 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
809 struct d40_phy_lli *lli_src = desc->lli_phy.src;
810 void __iomem *base = chan_base(chan);
811
812 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
813 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
814 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
815 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
816
817 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
818 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
819 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
820 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
821 }
822
823 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
824 {
825 list_add_tail(&desc->node, &d40c->done);
826 }
827
828 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
829 {
830 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
831 struct d40_log_lli_bidir *lli = &desc->lli_log;
832 int lli_current = desc->lli_current;
833 int lli_len = desc->lli_len;
834 bool cyclic = desc->cyclic;
835 int curr_lcla = -EINVAL;
836 int first_lcla = 0;
837 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
838 bool linkback;
839
840
841
842
843
844 linkback = cyclic && lli_current == 0;
845
846
847
848
849
850 if (linkback || (lli_len - lli_current > 1)) {
851
852
853
854
855
856
857 if (!(chan->phy_chan->use_soft_lli &&
858 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
859 curr_lcla = d40_lcla_alloc_one(chan, desc);
860
861 first_lcla = curr_lcla;
862 }
863
864
865
866
867
868
869
870 if (!linkback || curr_lcla == -EINVAL) {
871 unsigned int flags = 0;
872
873 if (curr_lcla == -EINVAL)
874 flags |= LLI_TERM_INT;
875
876 d40_log_lli_lcpa_write(chan->lcpa,
877 &lli->dst[lli_current],
878 &lli->src[lli_current],
879 curr_lcla,
880 flags);
881 lli_current++;
882 }
883
884 if (curr_lcla < 0)
885 goto set_current;
886
887 for (; lli_current < lli_len; lli_current++) {
888 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
889 8 * curr_lcla * 2;
890 struct d40_log_lli *lcla = pool->base + lcla_offset;
891 unsigned int flags = 0;
892 int next_lcla;
893
894 if (lli_current + 1 < lli_len)
895 next_lcla = d40_lcla_alloc_one(chan, desc);
896 else
897 next_lcla = linkback ? first_lcla : -EINVAL;
898
899 if (cyclic || next_lcla == -EINVAL)
900 flags |= LLI_TERM_INT;
901
902 if (linkback && curr_lcla == first_lcla) {
903
904 d40_log_lli_lcpa_write(chan->lcpa,
905 &lli->dst[lli_current],
906 &lli->src[lli_current],
907 next_lcla, flags);
908 }
909
910
911
912
913
914 d40_log_lli_lcla_write(lcla,
915 &lli->dst[lli_current],
916 &lli->src[lli_current],
917 next_lcla, flags);
918
919
920
921
922
923 if (!use_esram_lcla) {
924 dma_sync_single_range_for_device(chan->base->dev,
925 pool->dma_addr, lcla_offset,
926 2 * sizeof(struct d40_log_lli),
927 DMA_TO_DEVICE);
928 }
929 curr_lcla = next_lcla;
930
931 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
932 lli_current++;
933 break;
934 }
935 }
936 set_current:
937 desc->lli_current = lli_current;
938 }
939
940 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
941 {
942 if (chan_is_physical(d40c)) {
943 d40_phy_lli_load(d40c, d40d);
944 d40d->lli_current = d40d->lli_len;
945 } else
946 d40_log_lli_to_lcxa(d40c, d40d);
947 }
948
949 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
950 {
951 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
952 }
953
954
955 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
956 {
957 d40_desc_remove(desc);
958 desc->is_in_client_list = false;
959 list_add_tail(&desc->node, &d40c->pending_queue);
960 }
961
962 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
963 {
964 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
965 node);
966 }
967
968 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
969 {
970 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
971 }
972
973 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
974 {
975 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
976 }
977
978 static int d40_psize_2_burst_size(bool is_log, int psize)
979 {
980 if (is_log) {
981 if (psize == STEDMA40_PSIZE_LOG_1)
982 return 1;
983 } else {
984 if (psize == STEDMA40_PSIZE_PHY_1)
985 return 1;
986 }
987
988 return 2 << psize;
989 }
990
991
992
993
994
995
996
997 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
998 {
999 int dmalen;
1000 u32 max_w = max(data_width1, data_width2);
1001 u32 min_w = min(data_width1, data_width2);
1002 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1003
1004 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1005 seg_max -= max_w;
1006
1007 if (!IS_ALIGNED(size, max_w))
1008 return -EINVAL;
1009
1010 if (size <= seg_max)
1011 dmalen = 1;
1012 else {
1013 dmalen = size / seg_max;
1014 if (dmalen * seg_max < size)
1015 dmalen++;
1016 }
1017 return dmalen;
1018 }
1019
1020 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1021 u32 data_width1, u32 data_width2)
1022 {
1023 struct scatterlist *sg;
1024 int i;
1025 int len = 0;
1026 int ret;
1027
1028 for_each_sg(sgl, sg, sg_len, i) {
1029 ret = d40_size_2_dmalen(sg_dma_len(sg),
1030 data_width1, data_width2);
1031 if (ret < 0)
1032 return ret;
1033 len += ret;
1034 }
1035 return len;
1036 }
1037
1038 static int __d40_execute_command_phy(struct d40_chan *d40c,
1039 enum d40_command command)
1040 {
1041 u32 status;
1042 int i;
1043 void __iomem *active_reg;
1044 int ret = 0;
1045 unsigned long flags;
1046 u32 wmask;
1047
1048 if (command == D40_DMA_STOP) {
1049 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1050 if (ret)
1051 return ret;
1052 }
1053
1054 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1055
1056 if (d40c->phy_chan->num % 2 == 0)
1057 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1058 else
1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1060
1061 if (command == D40_DMA_SUSPEND_REQ) {
1062 status = (readl(active_reg) &
1063 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1064 D40_CHAN_POS(d40c->phy_chan->num);
1065
1066 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1067 goto unlock;
1068 }
1069
1070 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1071 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1072 active_reg);
1073
1074 if (command == D40_DMA_SUSPEND_REQ) {
1075
1076 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1077 status = (readl(active_reg) &
1078 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1079 D40_CHAN_POS(d40c->phy_chan->num);
1080
1081 cpu_relax();
1082
1083
1084
1085
1086 udelay(3);
1087
1088 if (status == D40_DMA_STOP ||
1089 status == D40_DMA_SUSPENDED)
1090 break;
1091 }
1092
1093 if (i == D40_SUSPEND_MAX_IT) {
1094 chan_err(d40c,
1095 "unable to suspend the chl %d (log: %d) status %x\n",
1096 d40c->phy_chan->num, d40c->log_num,
1097 status);
1098 dump_stack();
1099 ret = -EBUSY;
1100 }
1101
1102 }
1103 unlock:
1104 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1105 return ret;
1106 }
1107
1108 static void d40_term_all(struct d40_chan *d40c)
1109 {
1110 struct d40_desc *d40d;
1111 struct d40_desc *_d;
1112
1113
1114 while ((d40d = d40_first_done(d40c))) {
1115 d40_desc_remove(d40d);
1116 d40_desc_free(d40c, d40d);
1117 }
1118
1119
1120 while ((d40d = d40_first_active_get(d40c))) {
1121 d40_desc_remove(d40d);
1122 d40_desc_free(d40c, d40d);
1123 }
1124
1125
1126 while ((d40d = d40_first_queued(d40c))) {
1127 d40_desc_remove(d40d);
1128 d40_desc_free(d40c, d40d);
1129 }
1130
1131
1132 while ((d40d = d40_first_pending(d40c))) {
1133 d40_desc_remove(d40d);
1134 d40_desc_free(d40c, d40d);
1135 }
1136
1137
1138 if (!list_empty(&d40c->client))
1139 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1140 d40_desc_remove(d40d);
1141 d40_desc_free(d40c, d40d);
1142 }
1143
1144
1145 if (!list_empty(&d40c->prepare_queue))
1146 list_for_each_entry_safe(d40d, _d,
1147 &d40c->prepare_queue, node) {
1148 d40_desc_remove(d40d);
1149 d40_desc_free(d40c, d40d);
1150 }
1151
1152 d40c->pending_tx = 0;
1153 }
1154
1155 static void __d40_config_set_event(struct d40_chan *d40c,
1156 enum d40_events event_type, u32 event,
1157 int reg)
1158 {
1159 void __iomem *addr = chan_base(d40c) + reg;
1160 int tries;
1161 u32 status;
1162
1163 switch (event_type) {
1164
1165 case D40_DEACTIVATE_EVENTLINE:
1166
1167 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1168 | ~D40_EVENTLINE_MASK(event), addr);
1169 break;
1170
1171 case D40_SUSPEND_REQ_EVENTLINE:
1172 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1173 D40_EVENTLINE_POS(event);
1174
1175 if (status == D40_DEACTIVATE_EVENTLINE ||
1176 status == D40_SUSPEND_REQ_EVENTLINE)
1177 break;
1178
1179 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1180 | ~D40_EVENTLINE_MASK(event), addr);
1181
1182 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1183
1184 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1185 D40_EVENTLINE_POS(event);
1186
1187 cpu_relax();
1188
1189
1190
1191
1192 udelay(3);
1193
1194 if (status == D40_DEACTIVATE_EVENTLINE)
1195 break;
1196 }
1197
1198 if (tries == D40_SUSPEND_MAX_IT) {
1199 chan_err(d40c,
1200 "unable to stop the event_line chl %d (log: %d)"
1201 "status %x\n", d40c->phy_chan->num,
1202 d40c->log_num, status);
1203 }
1204 break;
1205
1206 case D40_ACTIVATE_EVENTLINE:
1207
1208
1209
1210
1211
1212 tries = 100;
1213 while (--tries) {
1214 writel((D40_ACTIVATE_EVENTLINE <<
1215 D40_EVENTLINE_POS(event)) |
1216 ~D40_EVENTLINE_MASK(event), addr);
1217
1218 if (readl(addr) & D40_EVENTLINE_MASK(event))
1219 break;
1220 }
1221
1222 if (tries != 99)
1223 dev_dbg(chan2dev(d40c),
1224 "[%s] workaround enable S%cLNK (%d tries)\n",
1225 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1226 100 - tries);
1227
1228 WARN_ON(!tries);
1229 break;
1230
1231 case D40_ROUND_EVENTLINE:
1232 BUG();
1233 break;
1234
1235 }
1236 }
1237
1238 static void d40_config_set_event(struct d40_chan *d40c,
1239 enum d40_events event_type)
1240 {
1241 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1242
1243
1244 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1245 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1246 __d40_config_set_event(d40c, event_type, event,
1247 D40_CHAN_REG_SSLNK);
1248
1249 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1250 __d40_config_set_event(d40c, event_type, event,
1251 D40_CHAN_REG_SDLNK);
1252 }
1253
1254 static u32 d40_chan_has_events(struct d40_chan *d40c)
1255 {
1256 void __iomem *chanbase = chan_base(d40c);
1257 u32 val;
1258
1259 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1260 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1261
1262 return val;
1263 }
1264
1265 static int
1266 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1267 {
1268 unsigned long flags;
1269 int ret = 0;
1270 u32 active_status;
1271 void __iomem *active_reg;
1272
1273 if (d40c->phy_chan->num % 2 == 0)
1274 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1275 else
1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1277
1278
1279 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1280
1281 switch (command) {
1282 case D40_DMA_STOP:
1283 case D40_DMA_SUSPEND_REQ:
1284
1285 active_status = (readl(active_reg) &
1286 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1287 D40_CHAN_POS(d40c->phy_chan->num);
1288
1289 if (active_status == D40_DMA_RUN)
1290 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1291 else
1292 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1293
1294 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1295 ret = __d40_execute_command_phy(d40c, command);
1296
1297 break;
1298
1299 case D40_DMA_RUN:
1300
1301 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1302 ret = __d40_execute_command_phy(d40c, command);
1303 break;
1304
1305 case D40_DMA_SUSPENDED:
1306 BUG();
1307 break;
1308 }
1309
1310 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1311 return ret;
1312 }
1313
1314 static int d40_channel_execute_command(struct d40_chan *d40c,
1315 enum d40_command command)
1316 {
1317 if (chan_is_logical(d40c))
1318 return __d40_execute_command_log(d40c, command);
1319 else
1320 return __d40_execute_command_phy(d40c, command);
1321 }
1322
1323 static u32 d40_get_prmo(struct d40_chan *d40c)
1324 {
1325 static const unsigned int phy_map[] = {
1326 [STEDMA40_PCHAN_BASIC_MODE]
1327 = D40_DREG_PRMO_PCHAN_BASIC,
1328 [STEDMA40_PCHAN_MODULO_MODE]
1329 = D40_DREG_PRMO_PCHAN_MODULO,
1330 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1331 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1332 };
1333 static const unsigned int log_map[] = {
1334 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1335 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1336 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1337 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1338 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1339 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1340 };
1341
1342 if (chan_is_physical(d40c))
1343 return phy_map[d40c->dma_cfg.mode_opt];
1344 else
1345 return log_map[d40c->dma_cfg.mode_opt];
1346 }
1347
1348 static void d40_config_write(struct d40_chan *d40c)
1349 {
1350 u32 addr_base;
1351 u32 var;
1352
1353
1354 addr_base = (d40c->phy_chan->num % 2) * 4;
1355
1356 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1357 D40_CHAN_POS(d40c->phy_chan->num);
1358 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1359
1360
1361 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1362
1363 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1364
1365 if (chan_is_logical(d40c)) {
1366 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1367 & D40_SREG_ELEM_LOG_LIDX_MASK;
1368 void __iomem *chanbase = chan_base(d40c);
1369
1370
1371 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1372 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1373
1374
1375 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1376 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1377
1378
1379 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1380 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1381 }
1382 }
1383
1384 static u32 d40_residue(struct d40_chan *d40c)
1385 {
1386 u32 num_elt;
1387
1388 if (chan_is_logical(d40c))
1389 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1390 >> D40_MEM_LCSP2_ECNT_POS;
1391 else {
1392 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1393 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1394 >> D40_SREG_ELEM_PHY_ECNT_POS;
1395 }
1396
1397 return num_elt * d40c->dma_cfg.dst_info.data_width;
1398 }
1399
1400 static bool d40_tx_is_linked(struct d40_chan *d40c)
1401 {
1402 bool is_link;
1403
1404 if (chan_is_logical(d40c))
1405 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1406 else
1407 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1408 & D40_SREG_LNK_PHYS_LNK_MASK;
1409
1410 return is_link;
1411 }
1412
1413 static int d40_pause(struct dma_chan *chan)
1414 {
1415 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1416 int res = 0;
1417 unsigned long flags;
1418
1419 if (d40c->phy_chan == NULL) {
1420 chan_err(d40c, "Channel is not allocated!\n");
1421 return -EINVAL;
1422 }
1423
1424 if (!d40c->busy)
1425 return 0;
1426
1427 spin_lock_irqsave(&d40c->lock, flags);
1428 pm_runtime_get_sync(d40c->base->dev);
1429
1430 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1431
1432 pm_runtime_mark_last_busy(d40c->base->dev);
1433 pm_runtime_put_autosuspend(d40c->base->dev);
1434 spin_unlock_irqrestore(&d40c->lock, flags);
1435 return res;
1436 }
1437
1438 static int d40_resume(struct dma_chan *chan)
1439 {
1440 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1441 int res = 0;
1442 unsigned long flags;
1443
1444 if (d40c->phy_chan == NULL) {
1445 chan_err(d40c, "Channel is not allocated!\n");
1446 return -EINVAL;
1447 }
1448
1449 if (!d40c->busy)
1450 return 0;
1451
1452 spin_lock_irqsave(&d40c->lock, flags);
1453 pm_runtime_get_sync(d40c->base->dev);
1454
1455
1456 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1457 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1458
1459 pm_runtime_mark_last_busy(d40c->base->dev);
1460 pm_runtime_put_autosuspend(d40c->base->dev);
1461 spin_unlock_irqrestore(&d40c->lock, flags);
1462 return res;
1463 }
1464
1465 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1466 {
1467 struct d40_chan *d40c = container_of(tx->chan,
1468 struct d40_chan,
1469 chan);
1470 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1471 unsigned long flags;
1472 dma_cookie_t cookie;
1473
1474 spin_lock_irqsave(&d40c->lock, flags);
1475 cookie = dma_cookie_assign(tx);
1476 d40_desc_queue(d40c, d40d);
1477 spin_unlock_irqrestore(&d40c->lock, flags);
1478
1479 return cookie;
1480 }
1481
1482 static int d40_start(struct d40_chan *d40c)
1483 {
1484 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1485 }
1486
1487 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1488 {
1489 struct d40_desc *d40d;
1490 int err;
1491
1492
1493 d40d = d40_first_queued(d40c);
1494
1495 if (d40d != NULL) {
1496 if (!d40c->busy) {
1497 d40c->busy = true;
1498 pm_runtime_get_sync(d40c->base->dev);
1499 }
1500
1501
1502 d40_desc_remove(d40d);
1503
1504
1505 d40_desc_submit(d40c, d40d);
1506
1507
1508 d40_desc_load(d40c, d40d);
1509
1510
1511 err = d40_start(d40c);
1512
1513 if (err)
1514 return NULL;
1515 }
1516
1517 return d40d;
1518 }
1519
1520
1521 static void dma_tc_handle(struct d40_chan *d40c)
1522 {
1523 struct d40_desc *d40d;
1524
1525
1526 d40d = d40_first_active_get(d40c);
1527
1528 if (d40d == NULL)
1529 return;
1530
1531 if (d40d->cyclic) {
1532
1533
1534
1535
1536
1537
1538 if (d40d->lli_current < d40d->lli_len
1539 && !d40_tx_is_linked(d40c)
1540 && !d40_residue(d40c)) {
1541 d40_lcla_free_all(d40c, d40d);
1542 d40_desc_load(d40c, d40d);
1543 (void) d40_start(d40c);
1544
1545 if (d40d->lli_current == d40d->lli_len)
1546 d40d->lli_current = 0;
1547 }
1548 } else {
1549 d40_lcla_free_all(d40c, d40d);
1550
1551 if (d40d->lli_current < d40d->lli_len) {
1552 d40_desc_load(d40c, d40d);
1553
1554 (void) d40_start(d40c);
1555 return;
1556 }
1557
1558 if (d40_queue_start(d40c) == NULL) {
1559 d40c->busy = false;
1560
1561 pm_runtime_mark_last_busy(d40c->base->dev);
1562 pm_runtime_put_autosuspend(d40c->base->dev);
1563 }
1564
1565 d40_desc_remove(d40d);
1566 d40_desc_done(d40c, d40d);
1567 }
1568
1569 d40c->pending_tx++;
1570 tasklet_schedule(&d40c->tasklet);
1571
1572 }
1573
1574 static void dma_tasklet(unsigned long data)
1575 {
1576 struct d40_chan *d40c = (struct d40_chan *) data;
1577 struct d40_desc *d40d;
1578 unsigned long flags;
1579 bool callback_active;
1580 struct dmaengine_desc_callback cb;
1581
1582 spin_lock_irqsave(&d40c->lock, flags);
1583
1584
1585 d40d = d40_first_done(d40c);
1586 if (d40d == NULL) {
1587
1588 d40d = d40_first_active_get(d40c);
1589 if (d40d == NULL || !d40d->cyclic)
1590 goto check_pending_tx;
1591 }
1592
1593 if (!d40d->cyclic)
1594 dma_cookie_complete(&d40d->txd);
1595
1596
1597
1598
1599
1600 if (d40c->pending_tx == 0) {
1601 spin_unlock_irqrestore(&d40c->lock, flags);
1602 return;
1603 }
1604
1605
1606 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1607 dmaengine_desc_get_callback(&d40d->txd, &cb);
1608
1609 if (!d40d->cyclic) {
1610 if (async_tx_test_ack(&d40d->txd)) {
1611 d40_desc_remove(d40d);
1612 d40_desc_free(d40c, d40d);
1613 } else if (!d40d->is_in_client_list) {
1614 d40_desc_remove(d40d);
1615 d40_lcla_free_all(d40c, d40d);
1616 list_add_tail(&d40d->node, &d40c->client);
1617 d40d->is_in_client_list = true;
1618 }
1619 }
1620
1621 d40c->pending_tx--;
1622
1623 if (d40c->pending_tx)
1624 tasklet_schedule(&d40c->tasklet);
1625
1626 spin_unlock_irqrestore(&d40c->lock, flags);
1627
1628 if (callback_active)
1629 dmaengine_desc_callback_invoke(&cb, NULL);
1630
1631 return;
1632 check_pending_tx:
1633
1634 if (d40c->pending_tx > 0)
1635 d40c->pending_tx--;
1636 spin_unlock_irqrestore(&d40c->lock, flags);
1637 }
1638
1639 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1640 {
1641 int i;
1642 u32 idx;
1643 u32 row;
1644 long chan = -1;
1645 struct d40_chan *d40c;
1646 unsigned long flags;
1647 struct d40_base *base = data;
1648 u32 *regs = base->regs_interrupt;
1649 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1650 u32 il_size = base->gen_dmac.il_size;
1651
1652 spin_lock_irqsave(&base->interrupt_lock, flags);
1653
1654
1655 for (i = 0; i < il_size; i++)
1656 regs[i] = readl(base->virtbase + il[i].src);
1657
1658 for (;;) {
1659
1660 chan = find_next_bit((unsigned long *)regs,
1661 BITS_PER_LONG * il_size, chan + 1);
1662
1663
1664 if (chan == BITS_PER_LONG * il_size)
1665 break;
1666
1667 row = chan / BITS_PER_LONG;
1668 idx = chan & (BITS_PER_LONG - 1);
1669
1670 if (il[row].offset == D40_PHY_CHAN)
1671 d40c = base->lookup_phy_chans[idx];
1672 else
1673 d40c = base->lookup_log_chans[il[row].offset + idx];
1674
1675 if (!d40c) {
1676
1677
1678
1679
1680 continue;
1681 }
1682
1683
1684 writel(BIT(idx), base->virtbase + il[row].clr);
1685
1686 spin_lock(&d40c->lock);
1687
1688 if (!il[row].is_error)
1689 dma_tc_handle(d40c);
1690 else
1691 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1692 chan, il[row].offset, idx);
1693
1694 spin_unlock(&d40c->lock);
1695 }
1696
1697 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1698
1699 return IRQ_HANDLED;
1700 }
1701
1702 static int d40_validate_conf(struct d40_chan *d40c,
1703 struct stedma40_chan_cfg *conf)
1704 {
1705 int res = 0;
1706 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1707
1708 if (!conf->dir) {
1709 chan_err(d40c, "Invalid direction.\n");
1710 res = -EINVAL;
1711 }
1712
1713 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1714 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1715 (conf->dev_type < 0)) {
1716 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1717 res = -EINVAL;
1718 }
1719
1720 if (conf->dir == DMA_DEV_TO_DEV) {
1721
1722
1723
1724
1725 chan_err(d40c, "periph to periph not supported\n");
1726 res = -EINVAL;
1727 }
1728
1729 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1730 conf->src_info.data_width !=
1731 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1732 conf->dst_info.data_width) {
1733
1734
1735
1736
1737
1738 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1739 res = -EINVAL;
1740 }
1741
1742 return res;
1743 }
1744
1745 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1746 bool is_src, int log_event_line, bool is_log,
1747 bool *first_user)
1748 {
1749 unsigned long flags;
1750 spin_lock_irqsave(&phy->lock, flags);
1751
1752 *first_user = ((phy->allocated_src | phy->allocated_dst)
1753 == D40_ALLOC_FREE);
1754
1755 if (!is_log) {
1756
1757 if (phy->allocated_src == D40_ALLOC_FREE &&
1758 phy->allocated_dst == D40_ALLOC_FREE) {
1759 phy->allocated_dst = D40_ALLOC_PHY;
1760 phy->allocated_src = D40_ALLOC_PHY;
1761 goto found_unlock;
1762 } else
1763 goto not_found_unlock;
1764 }
1765
1766
1767 if (is_src) {
1768 if (phy->allocated_src == D40_ALLOC_PHY)
1769 goto not_found_unlock;
1770
1771 if (phy->allocated_src == D40_ALLOC_FREE)
1772 phy->allocated_src = D40_ALLOC_LOG_FREE;
1773
1774 if (!(phy->allocated_src & BIT(log_event_line))) {
1775 phy->allocated_src |= BIT(log_event_line);
1776 goto found_unlock;
1777 } else
1778 goto not_found_unlock;
1779 } else {
1780 if (phy->allocated_dst == D40_ALLOC_PHY)
1781 goto not_found_unlock;
1782
1783 if (phy->allocated_dst == D40_ALLOC_FREE)
1784 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1785
1786 if (!(phy->allocated_dst & BIT(log_event_line))) {
1787 phy->allocated_dst |= BIT(log_event_line);
1788 goto found_unlock;
1789 }
1790 }
1791 not_found_unlock:
1792 spin_unlock_irqrestore(&phy->lock, flags);
1793 return false;
1794 found_unlock:
1795 spin_unlock_irqrestore(&phy->lock, flags);
1796 return true;
1797 }
1798
1799 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1800 int log_event_line)
1801 {
1802 unsigned long flags;
1803 bool is_free = false;
1804
1805 spin_lock_irqsave(&phy->lock, flags);
1806 if (!log_event_line) {
1807 phy->allocated_dst = D40_ALLOC_FREE;
1808 phy->allocated_src = D40_ALLOC_FREE;
1809 is_free = true;
1810 goto unlock;
1811 }
1812
1813
1814 if (is_src) {
1815 phy->allocated_src &= ~BIT(log_event_line);
1816 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1817 phy->allocated_src = D40_ALLOC_FREE;
1818 } else {
1819 phy->allocated_dst &= ~BIT(log_event_line);
1820 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1821 phy->allocated_dst = D40_ALLOC_FREE;
1822 }
1823
1824 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1825 D40_ALLOC_FREE);
1826 unlock:
1827 spin_unlock_irqrestore(&phy->lock, flags);
1828
1829 return is_free;
1830 }
1831
1832 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1833 {
1834 int dev_type = d40c->dma_cfg.dev_type;
1835 int event_group;
1836 int event_line;
1837 struct d40_phy_res *phys;
1838 int i;
1839 int j;
1840 int log_num;
1841 int num_phy_chans;
1842 bool is_src;
1843 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1844
1845 phys = d40c->base->phy_res;
1846 num_phy_chans = d40c->base->num_phy_chans;
1847
1848 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1849 log_num = 2 * dev_type;
1850 is_src = true;
1851 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1852 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1853
1854 log_num = 2 * dev_type + 1;
1855 is_src = false;
1856 } else
1857 return -EINVAL;
1858
1859 event_group = D40_TYPE_TO_GROUP(dev_type);
1860 event_line = D40_TYPE_TO_EVENT(dev_type);
1861
1862 if (!is_log) {
1863 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1864
1865 if (d40c->dma_cfg.use_fixed_channel) {
1866 i = d40c->dma_cfg.phy_channel;
1867 if (d40_alloc_mask_set(&phys[i], is_src,
1868 0, is_log,
1869 first_phy_user))
1870 goto found_phy;
1871 } else {
1872 for (i = 0; i < num_phy_chans; i++) {
1873 if (d40_alloc_mask_set(&phys[i], is_src,
1874 0, is_log,
1875 first_phy_user))
1876 goto found_phy;
1877 }
1878 }
1879 } else
1880 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1881 int phy_num = j + event_group * 2;
1882 for (i = phy_num; i < phy_num + 2; i++) {
1883 if (d40_alloc_mask_set(&phys[i],
1884 is_src,
1885 0,
1886 is_log,
1887 first_phy_user))
1888 goto found_phy;
1889 }
1890 }
1891 return -EINVAL;
1892 found_phy:
1893 d40c->phy_chan = &phys[i];
1894 d40c->log_num = D40_PHY_CHAN;
1895 goto out;
1896 }
1897 if (dev_type == -1)
1898 return -EINVAL;
1899
1900
1901 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1902 int phy_num = j + event_group * 2;
1903
1904 if (d40c->dma_cfg.use_fixed_channel) {
1905 i = d40c->dma_cfg.phy_channel;
1906
1907 if ((i != phy_num) && (i != phy_num + 1)) {
1908 dev_err(chan2dev(d40c),
1909 "invalid fixed phy channel %d\n", i);
1910 return -EINVAL;
1911 }
1912
1913 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1914 is_log, first_phy_user))
1915 goto found_log;
1916
1917 dev_err(chan2dev(d40c),
1918 "could not allocate fixed phy channel %d\n", i);
1919 return -EINVAL;
1920 }
1921
1922
1923
1924
1925
1926
1927 if (is_src) {
1928 for (i = phy_num; i < phy_num + 2; i++) {
1929 if (d40_alloc_mask_set(&phys[i], is_src,
1930 event_line, is_log,
1931 first_phy_user))
1932 goto found_log;
1933 }
1934 } else {
1935 for (i = phy_num + 1; i >= phy_num; i--) {
1936 if (d40_alloc_mask_set(&phys[i], is_src,
1937 event_line, is_log,
1938 first_phy_user))
1939 goto found_log;
1940 }
1941 }
1942 }
1943 return -EINVAL;
1944
1945 found_log:
1946 d40c->phy_chan = &phys[i];
1947 d40c->log_num = log_num;
1948 out:
1949
1950 if (is_log)
1951 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1952 else
1953 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1954
1955 return 0;
1956
1957 }
1958
1959 static int d40_config_memcpy(struct d40_chan *d40c)
1960 {
1961 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1962
1963 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1964 d40c->dma_cfg = dma40_memcpy_conf_log;
1965 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1966
1967 d40_log_cfg(&d40c->dma_cfg,
1968 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1969
1970 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1971 dma_has_cap(DMA_SLAVE, cap)) {
1972 d40c->dma_cfg = dma40_memcpy_conf_phy;
1973
1974
1975 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1976
1977
1978 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1979 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1980
1981 } else {
1982 chan_err(d40c, "No memcpy\n");
1983 return -EINVAL;
1984 }
1985
1986 return 0;
1987 }
1988
1989 static int d40_free_dma(struct d40_chan *d40c)
1990 {
1991
1992 int res = 0;
1993 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1994 struct d40_phy_res *phy = d40c->phy_chan;
1995 bool is_src;
1996
1997
1998 d40_term_all(d40c);
1999
2000 if (phy == NULL) {
2001 chan_err(d40c, "phy == null\n");
2002 return -EINVAL;
2003 }
2004
2005 if (phy->allocated_src == D40_ALLOC_FREE &&
2006 phy->allocated_dst == D40_ALLOC_FREE) {
2007 chan_err(d40c, "channel already free\n");
2008 return -EINVAL;
2009 }
2010
2011 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2012 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2013 is_src = false;
2014 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2015 is_src = true;
2016 else {
2017 chan_err(d40c, "Unknown direction\n");
2018 return -EINVAL;
2019 }
2020
2021 pm_runtime_get_sync(d40c->base->dev);
2022 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2023 if (res) {
2024 chan_err(d40c, "stop failed\n");
2025 goto mark_last_busy;
2026 }
2027
2028 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2029
2030 if (chan_is_logical(d40c))
2031 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2032 else
2033 d40c->base->lookup_phy_chans[phy->num] = NULL;
2034
2035 if (d40c->busy) {
2036 pm_runtime_mark_last_busy(d40c->base->dev);
2037 pm_runtime_put_autosuspend(d40c->base->dev);
2038 }
2039
2040 d40c->busy = false;
2041 d40c->phy_chan = NULL;
2042 d40c->configured = false;
2043 mark_last_busy:
2044 pm_runtime_mark_last_busy(d40c->base->dev);
2045 pm_runtime_put_autosuspend(d40c->base->dev);
2046 return res;
2047 }
2048
2049 static bool d40_is_paused(struct d40_chan *d40c)
2050 {
2051 void __iomem *chanbase = chan_base(d40c);
2052 bool is_paused = false;
2053 unsigned long flags;
2054 void __iomem *active_reg;
2055 u32 status;
2056 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2057
2058 spin_lock_irqsave(&d40c->lock, flags);
2059
2060 if (chan_is_physical(d40c)) {
2061 if (d40c->phy_chan->num % 2 == 0)
2062 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2063 else
2064 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2065
2066 status = (readl(active_reg) &
2067 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2068 D40_CHAN_POS(d40c->phy_chan->num);
2069 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2070 is_paused = true;
2071 goto unlock;
2072 }
2073
2074 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2075 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2076 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2077 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2078 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2079 } else {
2080 chan_err(d40c, "Unknown direction\n");
2081 goto unlock;
2082 }
2083
2084 status = (status & D40_EVENTLINE_MASK(event)) >>
2085 D40_EVENTLINE_POS(event);
2086
2087 if (status != D40_DMA_RUN)
2088 is_paused = true;
2089 unlock:
2090 spin_unlock_irqrestore(&d40c->lock, flags);
2091 return is_paused;
2092
2093 }
2094
2095 static u32 stedma40_residue(struct dma_chan *chan)
2096 {
2097 struct d40_chan *d40c =
2098 container_of(chan, struct d40_chan, chan);
2099 u32 bytes_left;
2100 unsigned long flags;
2101
2102 spin_lock_irqsave(&d40c->lock, flags);
2103 bytes_left = d40_residue(d40c);
2104 spin_unlock_irqrestore(&d40c->lock, flags);
2105
2106 return bytes_left;
2107 }
2108
2109 static int
2110 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2111 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2112 unsigned int sg_len, dma_addr_t src_dev_addr,
2113 dma_addr_t dst_dev_addr)
2114 {
2115 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2116 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2117 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2118 int ret;
2119
2120 ret = d40_log_sg_to_lli(sg_src, sg_len,
2121 src_dev_addr,
2122 desc->lli_log.src,
2123 chan->log_def.lcsp1,
2124 src_info->data_width,
2125 dst_info->data_width);
2126
2127 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2128 dst_dev_addr,
2129 desc->lli_log.dst,
2130 chan->log_def.lcsp3,
2131 dst_info->data_width,
2132 src_info->data_width);
2133
2134 return ret < 0 ? ret : 0;
2135 }
2136
2137 static int
2138 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2139 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2140 unsigned int sg_len, dma_addr_t src_dev_addr,
2141 dma_addr_t dst_dev_addr)
2142 {
2143 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2144 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2145 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2146 unsigned long flags = 0;
2147 int ret;
2148
2149 if (desc->cyclic)
2150 flags |= LLI_CYCLIC | LLI_TERM_INT;
2151
2152 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2153 desc->lli_phy.src,
2154 virt_to_phys(desc->lli_phy.src),
2155 chan->src_def_cfg,
2156 src_info, dst_info, flags);
2157
2158 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2159 desc->lli_phy.dst,
2160 virt_to_phys(desc->lli_phy.dst),
2161 chan->dst_def_cfg,
2162 dst_info, src_info, flags);
2163
2164 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2165 desc->lli_pool.size, DMA_TO_DEVICE);
2166
2167 return ret < 0 ? ret : 0;
2168 }
2169
2170 static struct d40_desc *
2171 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2172 unsigned int sg_len, unsigned long dma_flags)
2173 {
2174 struct stedma40_chan_cfg *cfg;
2175 struct d40_desc *desc;
2176 int ret;
2177
2178 desc = d40_desc_get(chan);
2179 if (!desc)
2180 return NULL;
2181
2182 cfg = &chan->dma_cfg;
2183 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2184 cfg->dst_info.data_width);
2185 if (desc->lli_len < 0) {
2186 chan_err(chan, "Unaligned size\n");
2187 goto free_desc;
2188 }
2189
2190 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2191 if (ret < 0) {
2192 chan_err(chan, "Could not allocate lli\n");
2193 goto free_desc;
2194 }
2195
2196 desc->lli_current = 0;
2197 desc->txd.flags = dma_flags;
2198 desc->txd.tx_submit = d40_tx_submit;
2199
2200 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2201
2202 return desc;
2203 free_desc:
2204 d40_desc_free(chan, desc);
2205 return NULL;
2206 }
2207
2208 static struct dma_async_tx_descriptor *
2209 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2210 struct scatterlist *sg_dst, unsigned int sg_len,
2211 enum dma_transfer_direction direction, unsigned long dma_flags)
2212 {
2213 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2214 dma_addr_t src_dev_addr;
2215 dma_addr_t dst_dev_addr;
2216 struct d40_desc *desc;
2217 unsigned long flags;
2218 int ret;
2219
2220 if (!chan->phy_chan) {
2221 chan_err(chan, "Cannot prepare unallocated channel\n");
2222 return NULL;
2223 }
2224
2225 d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
2226
2227 spin_lock_irqsave(&chan->lock, flags);
2228
2229 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2230 if (desc == NULL)
2231 goto unlock;
2232
2233 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2234 desc->cyclic = true;
2235
2236 src_dev_addr = 0;
2237 dst_dev_addr = 0;
2238 if (direction == DMA_DEV_TO_MEM)
2239 src_dev_addr = chan->runtime_addr;
2240 else if (direction == DMA_MEM_TO_DEV)
2241 dst_dev_addr = chan->runtime_addr;
2242
2243 if (chan_is_logical(chan))
2244 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2245 sg_len, src_dev_addr, dst_dev_addr);
2246 else
2247 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2248 sg_len, src_dev_addr, dst_dev_addr);
2249
2250 if (ret) {
2251 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2252 chan_is_logical(chan) ? "log" : "phy", ret);
2253 goto free_desc;
2254 }
2255
2256
2257
2258
2259
2260 list_add_tail(&desc->node, &chan->prepare_queue);
2261
2262 spin_unlock_irqrestore(&chan->lock, flags);
2263
2264 return &desc->txd;
2265 free_desc:
2266 d40_desc_free(chan, desc);
2267 unlock:
2268 spin_unlock_irqrestore(&chan->lock, flags);
2269 return NULL;
2270 }
2271
2272 bool stedma40_filter(struct dma_chan *chan, void *data)
2273 {
2274 struct stedma40_chan_cfg *info = data;
2275 struct d40_chan *d40c =
2276 container_of(chan, struct d40_chan, chan);
2277 int err;
2278
2279 if (data) {
2280 err = d40_validate_conf(d40c, info);
2281 if (!err)
2282 d40c->dma_cfg = *info;
2283 } else
2284 err = d40_config_memcpy(d40c);
2285
2286 if (!err)
2287 d40c->configured = true;
2288
2289 return err == 0;
2290 }
2291 EXPORT_SYMBOL(stedma40_filter);
2292
2293 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2294 {
2295 bool realtime = d40c->dma_cfg.realtime;
2296 bool highprio = d40c->dma_cfg.high_priority;
2297 u32 rtreg;
2298 u32 event = D40_TYPE_TO_EVENT(dev_type);
2299 u32 group = D40_TYPE_TO_GROUP(dev_type);
2300 u32 bit = BIT(event);
2301 u32 prioreg;
2302 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2303
2304 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2305
2306
2307
2308
2309
2310
2311
2312
2313 if (!src && chan_is_logical(d40c))
2314 highprio = false;
2315
2316 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2317
2318
2319 if (!src)
2320 bit <<= 16;
2321
2322 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2323 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2324 }
2325
2326 static void d40_set_prio_realtime(struct d40_chan *d40c)
2327 {
2328 if (d40c->base->rev < 3)
2329 return;
2330
2331 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2332 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2333 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2334
2335 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2336 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2337 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2338 }
2339
2340 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2341 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2342 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2343 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2344 #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2345
2346 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2347 struct of_dma *ofdma)
2348 {
2349 struct stedma40_chan_cfg cfg;
2350 dma_cap_mask_t cap;
2351 u32 flags;
2352
2353 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2354
2355 dma_cap_zero(cap);
2356 dma_cap_set(DMA_SLAVE, cap);
2357
2358 cfg.dev_type = dma_spec->args[0];
2359 flags = dma_spec->args[2];
2360
2361 switch (D40_DT_FLAGS_MODE(flags)) {
2362 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2363 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2364 }
2365
2366 switch (D40_DT_FLAGS_DIR(flags)) {
2367 case 0:
2368 cfg.dir = DMA_MEM_TO_DEV;
2369 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2370 break;
2371 case 1:
2372 cfg.dir = DMA_DEV_TO_MEM;
2373 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2374 break;
2375 }
2376
2377 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2378 cfg.phy_channel = dma_spec->args[1];
2379 cfg.use_fixed_channel = true;
2380 }
2381
2382 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2383 cfg.high_priority = true;
2384
2385 return dma_request_channel(cap, stedma40_filter, &cfg);
2386 }
2387
2388
2389 static int d40_alloc_chan_resources(struct dma_chan *chan)
2390 {
2391 int err;
2392 unsigned long flags;
2393 struct d40_chan *d40c =
2394 container_of(chan, struct d40_chan, chan);
2395 bool is_free_phy;
2396 spin_lock_irqsave(&d40c->lock, flags);
2397
2398 dma_cookie_init(chan);
2399
2400
2401 if (!d40c->configured) {
2402 err = d40_config_memcpy(d40c);
2403 if (err) {
2404 chan_err(d40c, "Failed to configure memcpy channel\n");
2405 goto mark_last_busy;
2406 }
2407 }
2408
2409 err = d40_allocate_channel(d40c, &is_free_phy);
2410 if (err) {
2411 chan_err(d40c, "Failed to allocate channel\n");
2412 d40c->configured = false;
2413 goto mark_last_busy;
2414 }
2415
2416 pm_runtime_get_sync(d40c->base->dev);
2417
2418 d40_set_prio_realtime(d40c);
2419
2420 if (chan_is_logical(d40c)) {
2421 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2422 d40c->lcpa = d40c->base->lcpa_base +
2423 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2424 else
2425 d40c->lcpa = d40c->base->lcpa_base +
2426 d40c->dma_cfg.dev_type *
2427 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2428
2429
2430 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2431 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2432 }
2433
2434 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2435 chan_is_logical(d40c) ? "logical" : "physical",
2436 d40c->phy_chan->num,
2437 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2438
2439
2440
2441
2442
2443
2444
2445 if (is_free_phy)
2446 d40_config_write(d40c);
2447 mark_last_busy:
2448 pm_runtime_mark_last_busy(d40c->base->dev);
2449 pm_runtime_put_autosuspend(d40c->base->dev);
2450 spin_unlock_irqrestore(&d40c->lock, flags);
2451 return err;
2452 }
2453
2454 static void d40_free_chan_resources(struct dma_chan *chan)
2455 {
2456 struct d40_chan *d40c =
2457 container_of(chan, struct d40_chan, chan);
2458 int err;
2459 unsigned long flags;
2460
2461 if (d40c->phy_chan == NULL) {
2462 chan_err(d40c, "Cannot free unallocated channel\n");
2463 return;
2464 }
2465
2466 spin_lock_irqsave(&d40c->lock, flags);
2467
2468 err = d40_free_dma(d40c);
2469
2470 if (err)
2471 chan_err(d40c, "Failed to free channel\n");
2472 spin_unlock_irqrestore(&d40c->lock, flags);
2473 }
2474
2475 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2476 dma_addr_t dst,
2477 dma_addr_t src,
2478 size_t size,
2479 unsigned long dma_flags)
2480 {
2481 struct scatterlist dst_sg;
2482 struct scatterlist src_sg;
2483
2484 sg_init_table(&dst_sg, 1);
2485 sg_init_table(&src_sg, 1);
2486
2487 sg_dma_address(&dst_sg) = dst;
2488 sg_dma_address(&src_sg) = src;
2489
2490 sg_dma_len(&dst_sg) = size;
2491 sg_dma_len(&src_sg) = size;
2492
2493 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2494 DMA_MEM_TO_MEM, dma_flags);
2495 }
2496
2497 static struct dma_async_tx_descriptor *
2498 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2499 unsigned int sg_len, enum dma_transfer_direction direction,
2500 unsigned long dma_flags, void *context)
2501 {
2502 if (!is_slave_direction(direction))
2503 return NULL;
2504
2505 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2506 }
2507
2508 static struct dma_async_tx_descriptor *
2509 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2510 size_t buf_len, size_t period_len,
2511 enum dma_transfer_direction direction, unsigned long flags)
2512 {
2513 unsigned int periods = buf_len / period_len;
2514 struct dma_async_tx_descriptor *txd;
2515 struct scatterlist *sg;
2516 int i;
2517
2518 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2519 if (!sg)
2520 return NULL;
2521
2522 for (i = 0; i < periods; i++) {
2523 sg_dma_address(&sg[i]) = dma_addr;
2524 sg_dma_len(&sg[i]) = period_len;
2525 dma_addr += period_len;
2526 }
2527
2528 sg_chain(sg, periods + 1, sg);
2529
2530 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2531 DMA_PREP_INTERRUPT);
2532
2533 kfree(sg);
2534
2535 return txd;
2536 }
2537
2538 static enum dma_status d40_tx_status(struct dma_chan *chan,
2539 dma_cookie_t cookie,
2540 struct dma_tx_state *txstate)
2541 {
2542 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2543 enum dma_status ret;
2544
2545 if (d40c->phy_chan == NULL) {
2546 chan_err(d40c, "Cannot read status of unallocated channel\n");
2547 return -EINVAL;
2548 }
2549
2550 ret = dma_cookie_status(chan, cookie, txstate);
2551 if (ret != DMA_COMPLETE && txstate)
2552 dma_set_residue(txstate, stedma40_residue(chan));
2553
2554 if (d40_is_paused(d40c))
2555 ret = DMA_PAUSED;
2556
2557 return ret;
2558 }
2559
2560 static void d40_issue_pending(struct dma_chan *chan)
2561 {
2562 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2563 unsigned long flags;
2564
2565 if (d40c->phy_chan == NULL) {
2566 chan_err(d40c, "Channel is not allocated!\n");
2567 return;
2568 }
2569
2570 spin_lock_irqsave(&d40c->lock, flags);
2571
2572 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2573
2574
2575 if (!d40c->busy)
2576 (void) d40_queue_start(d40c);
2577
2578 spin_unlock_irqrestore(&d40c->lock, flags);
2579 }
2580
2581 static int d40_terminate_all(struct dma_chan *chan)
2582 {
2583 unsigned long flags;
2584 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2585 int ret;
2586
2587 if (d40c->phy_chan == NULL) {
2588 chan_err(d40c, "Channel is not allocated!\n");
2589 return -EINVAL;
2590 }
2591
2592 spin_lock_irqsave(&d40c->lock, flags);
2593
2594 pm_runtime_get_sync(d40c->base->dev);
2595 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2596 if (ret)
2597 chan_err(d40c, "Failed to stop channel\n");
2598
2599 d40_term_all(d40c);
2600 pm_runtime_mark_last_busy(d40c->base->dev);
2601 pm_runtime_put_autosuspend(d40c->base->dev);
2602 if (d40c->busy) {
2603 pm_runtime_mark_last_busy(d40c->base->dev);
2604 pm_runtime_put_autosuspend(d40c->base->dev);
2605 }
2606 d40c->busy = false;
2607
2608 spin_unlock_irqrestore(&d40c->lock, flags);
2609 return 0;
2610 }
2611
2612 static int
2613 dma40_config_to_halfchannel(struct d40_chan *d40c,
2614 struct stedma40_half_channel_info *info,
2615 u32 maxburst)
2616 {
2617 int psize;
2618
2619 if (chan_is_logical(d40c)) {
2620 if (maxburst >= 16)
2621 psize = STEDMA40_PSIZE_LOG_16;
2622 else if (maxburst >= 8)
2623 psize = STEDMA40_PSIZE_LOG_8;
2624 else if (maxburst >= 4)
2625 psize = STEDMA40_PSIZE_LOG_4;
2626 else
2627 psize = STEDMA40_PSIZE_LOG_1;
2628 } else {
2629 if (maxburst >= 16)
2630 psize = STEDMA40_PSIZE_PHY_16;
2631 else if (maxburst >= 8)
2632 psize = STEDMA40_PSIZE_PHY_8;
2633 else if (maxburst >= 4)
2634 psize = STEDMA40_PSIZE_PHY_4;
2635 else
2636 psize = STEDMA40_PSIZE_PHY_1;
2637 }
2638
2639 info->psize = psize;
2640 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2641
2642 return 0;
2643 }
2644
2645 static int d40_set_runtime_config(struct dma_chan *chan,
2646 struct dma_slave_config *config)
2647 {
2648 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2649
2650 memcpy(&d40c->slave_config, config, sizeof(*config));
2651
2652 return 0;
2653 }
2654
2655
2656 static int d40_set_runtime_config_write(struct dma_chan *chan,
2657 struct dma_slave_config *config,
2658 enum dma_transfer_direction direction)
2659 {
2660 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2661 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2662 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2663 dma_addr_t config_addr;
2664 u32 src_maxburst, dst_maxburst;
2665 int ret;
2666
2667 if (d40c->phy_chan == NULL) {
2668 chan_err(d40c, "Channel is not allocated!\n");
2669 return -EINVAL;
2670 }
2671
2672 src_addr_width = config->src_addr_width;
2673 src_maxburst = config->src_maxburst;
2674 dst_addr_width = config->dst_addr_width;
2675 dst_maxburst = config->dst_maxburst;
2676
2677 if (direction == DMA_DEV_TO_MEM) {
2678 config_addr = config->src_addr;
2679
2680 if (cfg->dir != DMA_DEV_TO_MEM)
2681 dev_dbg(d40c->base->dev,
2682 "channel was not configured for peripheral "
2683 "to memory transfer (%d) overriding\n",
2684 cfg->dir);
2685 cfg->dir = DMA_DEV_TO_MEM;
2686
2687
2688 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2689 dst_addr_width = src_addr_width;
2690 if (dst_maxburst == 0)
2691 dst_maxburst = src_maxburst;
2692
2693 } else if (direction == DMA_MEM_TO_DEV) {
2694 config_addr = config->dst_addr;
2695
2696 if (cfg->dir != DMA_MEM_TO_DEV)
2697 dev_dbg(d40c->base->dev,
2698 "channel was not configured for memory "
2699 "to peripheral transfer (%d) overriding\n",
2700 cfg->dir);
2701 cfg->dir = DMA_MEM_TO_DEV;
2702
2703
2704 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2705 src_addr_width = dst_addr_width;
2706 if (src_maxburst == 0)
2707 src_maxburst = dst_maxburst;
2708 } else {
2709 dev_err(d40c->base->dev,
2710 "unrecognized channel direction %d\n",
2711 direction);
2712 return -EINVAL;
2713 }
2714
2715 if (config_addr <= 0) {
2716 dev_err(d40c->base->dev, "no address supplied\n");
2717 return -EINVAL;
2718 }
2719
2720 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2721 dev_err(d40c->base->dev,
2722 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2723 src_maxburst,
2724 src_addr_width,
2725 dst_maxburst,
2726 dst_addr_width);
2727 return -EINVAL;
2728 }
2729
2730 if (src_maxburst > 16) {
2731 src_maxburst = 16;
2732 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2733 } else if (dst_maxburst > 16) {
2734 dst_maxburst = 16;
2735 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2736 }
2737
2738
2739 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2740 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2741 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2742 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2743 !is_power_of_2(src_addr_width) ||
2744 !is_power_of_2(dst_addr_width))
2745 return -EINVAL;
2746
2747 cfg->src_info.data_width = src_addr_width;
2748 cfg->dst_info.data_width = dst_addr_width;
2749
2750 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2751 src_maxburst);
2752 if (ret)
2753 return ret;
2754
2755 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2756 dst_maxburst);
2757 if (ret)
2758 return ret;
2759
2760
2761 if (chan_is_logical(d40c))
2762 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2763 else
2764 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2765
2766
2767 d40c->runtime_addr = config_addr;
2768 d40c->runtime_direction = direction;
2769 dev_dbg(d40c->base->dev,
2770 "configured channel %s for %s, data width %d/%d, "
2771 "maxburst %d/%d elements, LE, no flow control\n",
2772 dma_chan_name(chan),
2773 (direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2774 src_addr_width, dst_addr_width,
2775 src_maxburst, dst_maxburst);
2776
2777 return 0;
2778 }
2779
2780
2781
2782 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2783 struct d40_chan *chans, int offset,
2784 int num_chans)
2785 {
2786 int i = 0;
2787 struct d40_chan *d40c;
2788
2789 INIT_LIST_HEAD(&dma->channels);
2790
2791 for (i = offset; i < offset + num_chans; i++) {
2792 d40c = &chans[i];
2793 d40c->base = base;
2794 d40c->chan.device = dma;
2795
2796 spin_lock_init(&d40c->lock);
2797
2798 d40c->log_num = D40_PHY_CHAN;
2799
2800 INIT_LIST_HEAD(&d40c->done);
2801 INIT_LIST_HEAD(&d40c->active);
2802 INIT_LIST_HEAD(&d40c->queue);
2803 INIT_LIST_HEAD(&d40c->pending_queue);
2804 INIT_LIST_HEAD(&d40c->client);
2805 INIT_LIST_HEAD(&d40c->prepare_queue);
2806
2807 tasklet_init(&d40c->tasklet, dma_tasklet,
2808 (unsigned long) d40c);
2809
2810 list_add_tail(&d40c->chan.device_node,
2811 &dma->channels);
2812 }
2813 }
2814
2815 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2816 {
2817 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2818 dev->device_prep_slave_sg = d40_prep_slave_sg;
2819 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2820 }
2821
2822 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2823 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2824 dev->directions = BIT(DMA_MEM_TO_MEM);
2825
2826
2827
2828
2829 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2830 }
2831
2832 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2833 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2834
2835 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2836 dev->device_free_chan_resources = d40_free_chan_resources;
2837 dev->device_issue_pending = d40_issue_pending;
2838 dev->device_tx_status = d40_tx_status;
2839 dev->device_config = d40_set_runtime_config;
2840 dev->device_pause = d40_pause;
2841 dev->device_resume = d40_resume;
2842 dev->device_terminate_all = d40_terminate_all;
2843 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2844 dev->dev = base->dev;
2845 }
2846
2847 static int __init d40_dmaengine_init(struct d40_base *base,
2848 int num_reserved_chans)
2849 {
2850 int err ;
2851
2852 d40_chan_init(base, &base->dma_slave, base->log_chans,
2853 0, base->num_log_chans);
2854
2855 dma_cap_zero(base->dma_slave.cap_mask);
2856 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2857 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2858
2859 d40_ops_init(base, &base->dma_slave);
2860
2861 err = dmaenginem_async_device_register(&base->dma_slave);
2862
2863 if (err) {
2864 d40_err(base->dev, "Failed to register slave channels\n");
2865 goto exit;
2866 }
2867
2868 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2869 base->num_log_chans, base->num_memcpy_chans);
2870
2871 dma_cap_zero(base->dma_memcpy.cap_mask);
2872 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2873
2874 d40_ops_init(base, &base->dma_memcpy);
2875
2876 err = dmaenginem_async_device_register(&base->dma_memcpy);
2877
2878 if (err) {
2879 d40_err(base->dev,
2880 "Failed to register memcpy only channels\n");
2881 goto exit;
2882 }
2883
2884 d40_chan_init(base, &base->dma_both, base->phy_chans,
2885 0, num_reserved_chans);
2886
2887 dma_cap_zero(base->dma_both.cap_mask);
2888 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2889 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2890 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2891
2892 d40_ops_init(base, &base->dma_both);
2893 err = dmaenginem_async_device_register(&base->dma_both);
2894
2895 if (err) {
2896 d40_err(base->dev,
2897 "Failed to register logical and physical capable channels\n");
2898 goto exit;
2899 }
2900 return 0;
2901 exit:
2902 return err;
2903 }
2904
2905
2906 #ifdef CONFIG_PM_SLEEP
2907 static int dma40_suspend(struct device *dev)
2908 {
2909 struct d40_base *base = dev_get_drvdata(dev);
2910 int ret;
2911
2912 ret = pm_runtime_force_suspend(dev);
2913 if (ret)
2914 return ret;
2915
2916 if (base->lcpa_regulator)
2917 ret = regulator_disable(base->lcpa_regulator);
2918 return ret;
2919 }
2920
2921 static int dma40_resume(struct device *dev)
2922 {
2923 struct d40_base *base = dev_get_drvdata(dev);
2924 int ret = 0;
2925
2926 if (base->lcpa_regulator) {
2927 ret = regulator_enable(base->lcpa_regulator);
2928 if (ret)
2929 return ret;
2930 }
2931
2932 return pm_runtime_force_resume(dev);
2933 }
2934 #endif
2935
2936 #ifdef CONFIG_PM
2937 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2938 u32 *regaddr, int num, bool save)
2939 {
2940 int i;
2941
2942 for (i = 0; i < num; i++) {
2943 void __iomem *addr = baseaddr + regaddr[i];
2944
2945 if (save)
2946 backup[i] = readl_relaxed(addr);
2947 else
2948 writel_relaxed(backup[i], addr);
2949 }
2950 }
2951
2952 static void d40_save_restore_registers(struct d40_base *base, bool save)
2953 {
2954 int i;
2955
2956
2957 for (i = 0; i < base->num_phy_chans; i++) {
2958 void __iomem *addr;
2959 int idx;
2960
2961 if (base->phy_res[i].reserved)
2962 continue;
2963
2964 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2965 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2966
2967 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2968 d40_backup_regs_chan,
2969 ARRAY_SIZE(d40_backup_regs_chan),
2970 save);
2971 }
2972
2973
2974 dma40_backup(base->virtbase, base->reg_val_backup,
2975 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2976 save);
2977
2978
2979 if (base->gen_dmac.backup)
2980 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2981 base->gen_dmac.backup,
2982 base->gen_dmac.backup_size,
2983 save);
2984 }
2985
2986 static int dma40_runtime_suspend(struct device *dev)
2987 {
2988 struct d40_base *base = dev_get_drvdata(dev);
2989
2990 d40_save_restore_registers(base, true);
2991
2992
2993 if (base->rev != 1)
2994 writel_relaxed(base->gcc_pwr_off_mask,
2995 base->virtbase + D40_DREG_GCC);
2996
2997 return 0;
2998 }
2999
3000 static int dma40_runtime_resume(struct device *dev)
3001 {
3002 struct d40_base *base = dev_get_drvdata(dev);
3003
3004 d40_save_restore_registers(base, false);
3005
3006 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3007 base->virtbase + D40_DREG_GCC);
3008 return 0;
3009 }
3010 #endif
3011
3012 static const struct dev_pm_ops dma40_pm_ops = {
3013 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3014 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3015 dma40_runtime_resume,
3016 NULL)
3017 };
3018
3019
3020
3021 static int __init d40_phy_res_init(struct d40_base *base)
3022 {
3023 int i;
3024 int num_phy_chans_avail = 0;
3025 u32 val[2];
3026 int odd_even_bit = -2;
3027 int gcc = D40_DREG_GCC_ENA;
3028
3029 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3030 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3031
3032 for (i = 0; i < base->num_phy_chans; i++) {
3033 base->phy_res[i].num = i;
3034 odd_even_bit += 2 * ((i % 2) == 0);
3035 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3036
3037 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3038 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3039 base->phy_res[i].reserved = true;
3040 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3041 D40_DREG_GCC_SRC);
3042 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3043 D40_DREG_GCC_DST);
3044
3045
3046 } else {
3047 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3048 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3049 base->phy_res[i].reserved = false;
3050 num_phy_chans_avail++;
3051 }
3052 spin_lock_init(&base->phy_res[i].lock);
3053 }
3054
3055
3056 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3057 int chan = base->plat_data->disabled_channels[i];
3058
3059 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3060 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3061 base->phy_res[chan].reserved = true;
3062 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3063 D40_DREG_GCC_SRC);
3064 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3065 D40_DREG_GCC_DST);
3066 num_phy_chans_avail--;
3067 }
3068
3069
3070 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3071 int chan = base->plat_data->soft_lli_chans[i];
3072
3073 base->phy_res[chan].use_soft_lli = true;
3074 }
3075
3076 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3077 num_phy_chans_avail, base->num_phy_chans);
3078
3079
3080 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3081
3082 for (i = 0; i < base->num_phy_chans; i++) {
3083
3084 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3085 (val[0] & 0x3) != 1)
3086 dev_info(base->dev,
3087 "[%s] INFO: channel %d is misconfigured (%d)\n",
3088 __func__, i, val[0] & 0x3);
3089
3090 val[0] = val[0] >> 2;
3091 }
3092
3093
3094
3095
3096
3097
3098
3099 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3100 base->gcc_pwr_off_mask = gcc;
3101
3102 return num_phy_chans_avail;
3103 }
3104
3105 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3106 {
3107 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3108 struct clk *clk;
3109 void __iomem *virtbase;
3110 struct resource *res;
3111 struct d40_base *base;
3112 int num_log_chans;
3113 int num_phy_chans;
3114 int num_memcpy_chans;
3115 int clk_ret = -EINVAL;
3116 int i;
3117 u32 pid;
3118 u32 cid;
3119 u8 rev;
3120
3121 clk = clk_get(&pdev->dev, NULL);
3122 if (IS_ERR(clk)) {
3123 d40_err(&pdev->dev, "No matching clock found\n");
3124 goto check_prepare_enabled;
3125 }
3126
3127 clk_ret = clk_prepare_enable(clk);
3128 if (clk_ret) {
3129 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3130 goto disable_unprepare;
3131 }
3132
3133
3134 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3135 if (!res)
3136 goto disable_unprepare;
3137
3138 if (request_mem_region(res->start, resource_size(res),
3139 D40_NAME " I/O base") == NULL)
3140 goto release_region;
3141
3142 virtbase = ioremap(res->start, resource_size(res));
3143 if (!virtbase)
3144 goto release_region;
3145
3146
3147 for (pid = 0, i = 0; i < 4; i++)
3148 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3149 & 255) << (i * 8);
3150 for (cid = 0, i = 0; i < 4; i++)
3151 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3152 & 255) << (i * 8);
3153
3154 if (cid != AMBA_CID) {
3155 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3156 goto unmap_io;
3157 }
3158 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3159 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3160 AMBA_MANF_BITS(pid),
3161 AMBA_VENDOR_ST);
3162 goto unmap_io;
3163 }
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173 rev = AMBA_REV_BITS(pid);
3174 if (rev < 2) {
3175 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3176 goto unmap_io;
3177 }
3178
3179
3180 if (plat_data->num_of_phy_chans)
3181 num_phy_chans = plat_data->num_of_phy_chans;
3182 else
3183 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3184
3185
3186 if (plat_data->num_of_memcpy_chans)
3187 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3188 else
3189 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3190
3191 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3192
3193 dev_info(&pdev->dev,
3194 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3195 rev, &res->start, num_phy_chans, num_log_chans);
3196
3197 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3198 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3199 sizeof(struct d40_chan), GFP_KERNEL);
3200
3201 if (base == NULL)
3202 goto unmap_io;
3203
3204 base->rev = rev;
3205 base->clk = clk;
3206 base->num_memcpy_chans = num_memcpy_chans;
3207 base->num_phy_chans = num_phy_chans;
3208 base->num_log_chans = num_log_chans;
3209 base->phy_start = res->start;
3210 base->phy_size = resource_size(res);
3211 base->virtbase = virtbase;
3212 base->plat_data = plat_data;
3213 base->dev = &pdev->dev;
3214 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3215 base->log_chans = &base->phy_chans[num_phy_chans];
3216
3217 if (base->plat_data->num_of_phy_chans == 14) {
3218 base->gen_dmac.backup = d40_backup_regs_v4b;
3219 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3220 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3221 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3222 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3223 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3224 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3225 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3226 base->gen_dmac.il = il_v4b;
3227 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3228 base->gen_dmac.init_reg = dma_init_reg_v4b;
3229 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3230 } else {
3231 if (base->rev >= 3) {
3232 base->gen_dmac.backup = d40_backup_regs_v4a;
3233 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3234 }
3235 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3236 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3237 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3238 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3239 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3240 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3241 base->gen_dmac.il = il_v4a;
3242 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3243 base->gen_dmac.init_reg = dma_init_reg_v4a;
3244 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3245 }
3246
3247 base->phy_res = kcalloc(num_phy_chans,
3248 sizeof(*base->phy_res),
3249 GFP_KERNEL);
3250 if (!base->phy_res)
3251 goto free_base;
3252
3253 base->lookup_phy_chans = kcalloc(num_phy_chans,
3254 sizeof(*base->lookup_phy_chans),
3255 GFP_KERNEL);
3256 if (!base->lookup_phy_chans)
3257 goto free_phy_res;
3258
3259 base->lookup_log_chans = kcalloc(num_log_chans,
3260 sizeof(*base->lookup_log_chans),
3261 GFP_KERNEL);
3262 if (!base->lookup_log_chans)
3263 goto free_phy_chans;
3264
3265 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3266 sizeof(d40_backup_regs_chan),
3267 GFP_KERNEL);
3268 if (!base->reg_val_backup_chan)
3269 goto free_log_chans;
3270
3271 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3272 * D40_LCLA_LINK_PER_EVENT_GRP,
3273 sizeof(*base->lcla_pool.alloc_map),
3274 GFP_KERNEL);
3275 if (!base->lcla_pool.alloc_map)
3276 goto free_backup_chan;
3277
3278 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3279 sizeof(*base->regs_interrupt),
3280 GFP_KERNEL);
3281 if (!base->regs_interrupt)
3282 goto free_map;
3283
3284 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3285 0, SLAB_HWCACHE_ALIGN,
3286 NULL);
3287 if (base->desc_slab == NULL)
3288 goto free_regs;
3289
3290
3291 return base;
3292 free_regs:
3293 kfree(base->regs_interrupt);
3294 free_map:
3295 kfree(base->lcla_pool.alloc_map);
3296 free_backup_chan:
3297 kfree(base->reg_val_backup_chan);
3298 free_log_chans:
3299 kfree(base->lookup_log_chans);
3300 free_phy_chans:
3301 kfree(base->lookup_phy_chans);
3302 free_phy_res:
3303 kfree(base->phy_res);
3304 free_base:
3305 kfree(base);
3306 unmap_io:
3307 iounmap(virtbase);
3308 release_region:
3309 release_mem_region(res->start, resource_size(res));
3310 check_prepare_enabled:
3311 if (!clk_ret)
3312 disable_unprepare:
3313 clk_disable_unprepare(clk);
3314 if (!IS_ERR(clk))
3315 clk_put(clk);
3316 return NULL;
3317 }
3318
3319 static void __init d40_hw_init(struct d40_base *base)
3320 {
3321
3322 int i;
3323 u32 prmseo[2] = {0, 0};
3324 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3325 u32 pcmis = 0;
3326 u32 pcicr = 0;
3327 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3328 u32 reg_size = base->gen_dmac.init_reg_size;
3329
3330 for (i = 0; i < reg_size; i++)
3331 writel(dma_init_reg[i].val,
3332 base->virtbase + dma_init_reg[i].reg);
3333
3334
3335 for (i = 0; i < base->num_phy_chans; i++) {
3336
3337 activeo[i % 2] = activeo[i % 2] << 2;
3338
3339 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3340 == D40_ALLOC_PHY) {
3341 activeo[i % 2] |= 3;
3342 continue;
3343 }
3344
3345
3346 pcmis = (pcmis << 1) | 1;
3347
3348
3349 pcicr = (pcicr << 1) | 1;
3350
3351
3352 prmseo[i % 2] = prmseo[i % 2] << 2;
3353 prmseo[i % 2] |= 1;
3354
3355 }
3356
3357 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3358 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3359 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3360 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3361
3362
3363 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3364
3365
3366 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3367
3368
3369 base->gen_dmac.init_reg = NULL;
3370 base->gen_dmac.init_reg_size = 0;
3371 }
3372
3373 static int __init d40_lcla_allocate(struct d40_base *base)
3374 {
3375 struct d40_lcla_pool *pool = &base->lcla_pool;
3376 unsigned long *page_list;
3377 int i, j;
3378 int ret;
3379
3380
3381
3382
3383
3384
3385 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3386 sizeof(*page_list),
3387 GFP_KERNEL);
3388 if (!page_list)
3389 return -ENOMEM;
3390
3391
3392 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3393
3394 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3395 page_list[i] = __get_free_pages(GFP_KERNEL,
3396 base->lcla_pool.pages);
3397 if (!page_list[i]) {
3398
3399 d40_err(base->dev, "Failed to allocate %d pages.\n",
3400 base->lcla_pool.pages);
3401 ret = -ENOMEM;
3402
3403 for (j = 0; j < i; j++)
3404 free_pages(page_list[j], base->lcla_pool.pages);
3405 goto free_page_list;
3406 }
3407
3408 if ((virt_to_phys((void *)page_list[i]) &
3409 (LCLA_ALIGNMENT - 1)) == 0)
3410 break;
3411 }
3412
3413 for (j = 0; j < i; j++)
3414 free_pages(page_list[j], base->lcla_pool.pages);
3415
3416 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3417 base->lcla_pool.base = (void *)page_list[i];
3418 } else {
3419
3420
3421
3422
3423 dev_warn(base->dev,
3424 "[%s] Failed to get %d pages @ 18 bit align.\n",
3425 __func__, base->lcla_pool.pages);
3426 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3427 base->num_phy_chans +
3428 LCLA_ALIGNMENT,
3429 GFP_KERNEL);
3430 if (!base->lcla_pool.base_unaligned) {
3431 ret = -ENOMEM;
3432 goto free_page_list;
3433 }
3434
3435 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3436 LCLA_ALIGNMENT);
3437 }
3438
3439 pool->dma_addr = dma_map_single(base->dev, pool->base,
3440 SZ_1K * base->num_phy_chans,
3441 DMA_TO_DEVICE);
3442 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3443 pool->dma_addr = 0;
3444 ret = -ENOMEM;
3445 goto free_page_list;
3446 }
3447
3448 writel(virt_to_phys(base->lcla_pool.base),
3449 base->virtbase + D40_DREG_LCLA);
3450 ret = 0;
3451 free_page_list:
3452 kfree(page_list);
3453 return ret;
3454 }
3455
3456 static int __init d40_of_probe(struct platform_device *pdev,
3457 struct device_node *np)
3458 {
3459 struct stedma40_platform_data *pdata;
3460 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3461 const __be32 *list;
3462
3463 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3464 if (!pdata)
3465 return -ENOMEM;
3466
3467
3468 of_property_read_u32(np, "dma-channels", &num_phy);
3469 if (num_phy > 0)
3470 pdata->num_of_phy_chans = num_phy;
3471
3472 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3473 num_memcpy /= sizeof(*list);
3474
3475 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3476 d40_err(&pdev->dev,
3477 "Invalid number of memcpy channels specified (%d)\n",
3478 num_memcpy);
3479 return -EINVAL;
3480 }
3481 pdata->num_of_memcpy_chans = num_memcpy;
3482
3483 of_property_read_u32_array(np, "memcpy-channels",
3484 dma40_memcpy_channels,
3485 num_memcpy);
3486
3487 list = of_get_property(np, "disabled-channels", &num_disabled);
3488 num_disabled /= sizeof(*list);
3489
3490 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3491 d40_err(&pdev->dev,
3492 "Invalid number of disabled channels specified (%d)\n",
3493 num_disabled);
3494 return -EINVAL;
3495 }
3496
3497 of_property_read_u32_array(np, "disabled-channels",
3498 pdata->disabled_channels,
3499 num_disabled);
3500 pdata->disabled_channels[num_disabled] = -1;
3501
3502 pdev->dev.platform_data = pdata;
3503
3504 return 0;
3505 }
3506
3507 static int __init d40_probe(struct platform_device *pdev)
3508 {
3509 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3510 struct device_node *np = pdev->dev.of_node;
3511 int ret = -ENOENT;
3512 struct d40_base *base;
3513 struct resource *res;
3514 int num_reserved_chans;
3515 u32 val;
3516
3517 if (!plat_data) {
3518 if (np) {
3519 if (d40_of_probe(pdev, np)) {
3520 ret = -ENOMEM;
3521 goto report_failure;
3522 }
3523 } else {
3524 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3525 goto report_failure;
3526 }
3527 }
3528
3529 base = d40_hw_detect_init(pdev);
3530 if (!base)
3531 goto report_failure;
3532
3533 num_reserved_chans = d40_phy_res_init(base);
3534
3535 platform_set_drvdata(pdev, base);
3536
3537 spin_lock_init(&base->interrupt_lock);
3538 spin_lock_init(&base->execmd_lock);
3539
3540
3541 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3542 if (!res) {
3543 ret = -ENOENT;
3544 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3545 goto destroy_cache;
3546 }
3547 base->lcpa_size = resource_size(res);
3548 base->phy_lcpa = res->start;
3549
3550 if (request_mem_region(res->start, resource_size(res),
3551 D40_NAME " I/O lcpa") == NULL) {
3552 ret = -EBUSY;
3553 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3554 goto destroy_cache;
3555 }
3556
3557
3558 val = readl(base->virtbase + D40_DREG_LCPA);
3559 if (res->start != val && val != 0) {
3560 dev_warn(&pdev->dev,
3561 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3562 __func__, val, &res->start);
3563 } else
3564 writel(res->start, base->virtbase + D40_DREG_LCPA);
3565
3566 base->lcpa_base = ioremap(res->start, resource_size(res));
3567 if (!base->lcpa_base) {
3568 ret = -ENOMEM;
3569 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3570 goto destroy_cache;
3571 }
3572
3573 if (base->plat_data->use_esram_lcla) {
3574 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3575 "lcla_esram");
3576 if (!res) {
3577 ret = -ENOENT;
3578 d40_err(&pdev->dev,
3579 "No \"lcla_esram\" memory resource\n");
3580 goto destroy_cache;
3581 }
3582 base->lcla_pool.base = ioremap(res->start,
3583 resource_size(res));
3584 if (!base->lcla_pool.base) {
3585 ret = -ENOMEM;
3586 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3587 goto destroy_cache;
3588 }
3589 writel(res->start, base->virtbase + D40_DREG_LCLA);
3590
3591 } else {
3592 ret = d40_lcla_allocate(base);
3593 if (ret) {
3594 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3595 goto destroy_cache;
3596 }
3597 }
3598
3599 spin_lock_init(&base->lcla_pool.lock);
3600
3601 base->irq = platform_get_irq(pdev, 0);
3602
3603 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3604 if (ret) {
3605 d40_err(&pdev->dev, "No IRQ defined\n");
3606 goto destroy_cache;
3607 }
3608
3609 if (base->plat_data->use_esram_lcla) {
3610
3611 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3612 if (IS_ERR(base->lcpa_regulator)) {
3613 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3614 ret = PTR_ERR(base->lcpa_regulator);
3615 base->lcpa_regulator = NULL;
3616 goto destroy_cache;
3617 }
3618
3619 ret = regulator_enable(base->lcpa_regulator);
3620 if (ret) {
3621 d40_err(&pdev->dev,
3622 "Failed to enable lcpa_regulator\n");
3623 regulator_put(base->lcpa_regulator);
3624 base->lcpa_regulator = NULL;
3625 goto destroy_cache;
3626 }
3627 }
3628
3629 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3630
3631 pm_runtime_irq_safe(base->dev);
3632 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3633 pm_runtime_use_autosuspend(base->dev);
3634 pm_runtime_mark_last_busy(base->dev);
3635 pm_runtime_set_active(base->dev);
3636 pm_runtime_enable(base->dev);
3637
3638 ret = d40_dmaengine_init(base, num_reserved_chans);
3639 if (ret)
3640 goto destroy_cache;
3641
3642 base->dev->dma_parms = &base->dma_parms;
3643 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3644 if (ret) {
3645 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3646 goto destroy_cache;
3647 }
3648
3649 d40_hw_init(base);
3650
3651 if (np) {
3652 ret = of_dma_controller_register(np, d40_xlate, NULL);
3653 if (ret)
3654 dev_err(&pdev->dev,
3655 "could not register of_dma_controller\n");
3656 }
3657
3658 dev_info(base->dev, "initialized\n");
3659 return 0;
3660 destroy_cache:
3661 kmem_cache_destroy(base->desc_slab);
3662 if (base->virtbase)
3663 iounmap(base->virtbase);
3664
3665 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3666 iounmap(base->lcla_pool.base);
3667 base->lcla_pool.base = NULL;
3668 }
3669
3670 if (base->lcla_pool.dma_addr)
3671 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3672 SZ_1K * base->num_phy_chans,
3673 DMA_TO_DEVICE);
3674
3675 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3676 free_pages((unsigned long)base->lcla_pool.base,
3677 base->lcla_pool.pages);
3678
3679 kfree(base->lcla_pool.base_unaligned);
3680
3681 if (base->phy_lcpa)
3682 release_mem_region(base->phy_lcpa,
3683 base->lcpa_size);
3684 if (base->phy_start)
3685 release_mem_region(base->phy_start,
3686 base->phy_size);
3687 if (base->clk) {
3688 clk_disable_unprepare(base->clk);
3689 clk_put(base->clk);
3690 }
3691
3692 if (base->lcpa_regulator) {
3693 regulator_disable(base->lcpa_regulator);
3694 regulator_put(base->lcpa_regulator);
3695 }
3696
3697 kfree(base->lcla_pool.alloc_map);
3698 kfree(base->lookup_log_chans);
3699 kfree(base->lookup_phy_chans);
3700 kfree(base->phy_res);
3701 kfree(base);
3702 report_failure:
3703 d40_err(&pdev->dev, "probe failed\n");
3704 return ret;
3705 }
3706
3707 static const struct of_device_id d40_match[] = {
3708 { .compatible = "stericsson,dma40", },
3709 {}
3710 };
3711
3712 static struct platform_driver d40_driver = {
3713 .driver = {
3714 .name = D40_NAME,
3715 .pm = &dma40_pm_ops,
3716 .of_match_table = d40_match,
3717 },
3718 };
3719
3720 static int __init stedma40_init(void)
3721 {
3722 return platform_driver_probe(&d40_driver, d40_probe);
3723 }
3724 subsys_initcall(stedma40_init);