This source file includes following definitions.
- hidma_is_chan_enabled
- hidma_ll_free
- hidma_ll_request
- hidma_ll_tre_complete
- hidma_post_completed
- hidma_handle_tre_completion
- hidma_cleanup_pending_tre
- hidma_ll_reset
- hidma_ll_int_handler_internal
- hidma_ll_inthandler
- hidma_ll_inthandler_msi
- hidma_ll_enable
- hidma_ll_start
- hidma_ll_isenabled
- hidma_ll_queue_request
- hidma_ll_disable
- hidma_ll_set_transfer_params
- hidma_ll_setup
- hidma_ll_setup_irq
- hidma_ll_init
- hidma_ll_uninit
- hidma_ll_status
1
2
3
4
5
6
7
8 #include <linux/dmaengine.h>
9 #include <linux/slab.h>
10 #include <linux/interrupt.h>
11 #include <linux/mm.h>
12 #include <linux/highmem.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/delay.h>
15 #include <linux/atomic.h>
16 #include <linux/iopoll.h>
17 #include <linux/kfifo.h>
18 #include <linux/bitops.h>
19
20 #include "hidma.h"
21
22 #define HIDMA_EVRE_SIZE 16
23
24 #define HIDMA_TRCA_CTRLSTS_REG 0x000
25 #define HIDMA_TRCA_RING_LOW_REG 0x008
26 #define HIDMA_TRCA_RING_HIGH_REG 0x00C
27 #define HIDMA_TRCA_RING_LEN_REG 0x010
28 #define HIDMA_TRCA_DOORBELL_REG 0x400
29
30 #define HIDMA_EVCA_CTRLSTS_REG 0x000
31 #define HIDMA_EVCA_INTCTRL_REG 0x004
32 #define HIDMA_EVCA_RING_LOW_REG 0x008
33 #define HIDMA_EVCA_RING_HIGH_REG 0x00C
34 #define HIDMA_EVCA_RING_LEN_REG 0x010
35 #define HIDMA_EVCA_WRITE_PTR_REG 0x020
36 #define HIDMA_EVCA_DOORBELL_REG 0x400
37
38 #define HIDMA_EVCA_IRQ_STAT_REG 0x100
39 #define HIDMA_EVCA_IRQ_CLR_REG 0x108
40 #define HIDMA_EVCA_IRQ_EN_REG 0x110
41
42 #define HIDMA_EVRE_CFG_IDX 0
43
44 #define HIDMA_EVRE_ERRINFO_BIT_POS 24
45 #define HIDMA_EVRE_CODE_BIT_POS 28
46
47 #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
48 #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
49
50 #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
51 #define HIDMA_CH_STATE_MASK GENMASK(7, 0)
52 #define HIDMA_CH_STATE_BIT_POS 0x8
53
54 #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
55 #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
56 #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
57 #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
58 #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
59 #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
60
61 #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
62 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
63 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
64 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
65 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
66 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
67
68 #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
69 do { \
70 iter += size; \
71 if (iter >= ring_size) \
72 iter -= ring_size; \
73 } while (0)
74
75 #define HIDMA_CH_STATE(val) \
76 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
77
78 #define HIDMA_ERR_INT_MASK \
79 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
80 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
81 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
82 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
83 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
84
85 enum ch_command {
86 HIDMA_CH_DISABLE = 0,
87 HIDMA_CH_ENABLE = 1,
88 HIDMA_CH_SUSPEND = 2,
89 HIDMA_CH_RESET = 9,
90 };
91
92 enum ch_state {
93 HIDMA_CH_DISABLED = 0,
94 HIDMA_CH_ENABLED = 1,
95 HIDMA_CH_RUNNING = 2,
96 HIDMA_CH_SUSPENDED = 3,
97 HIDMA_CH_STOPPED = 4,
98 };
99
100 enum err_code {
101 HIDMA_EVRE_STATUS_COMPLETE = 1,
102 HIDMA_EVRE_STATUS_ERROR = 4,
103 };
104
105 static int hidma_is_chan_enabled(int state)
106 {
107 switch (state) {
108 case HIDMA_CH_ENABLED:
109 case HIDMA_CH_RUNNING:
110 return true;
111 default:
112 return false;
113 }
114 }
115
116 void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
117 {
118 struct hidma_tre *tre;
119
120 if (tre_ch >= lldev->nr_tres) {
121 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
122 return;
123 }
124
125 tre = &lldev->trepool[tre_ch];
126 if (atomic_read(&tre->allocated) != true) {
127 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
128 return;
129 }
130
131 atomic_set(&tre->allocated, 0);
132 }
133
134 int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
135 void (*callback)(void *data), void *data, u32 *tre_ch)
136 {
137 unsigned int i;
138 struct hidma_tre *tre;
139 u32 *tre_local;
140
141 if (!tre_ch || !lldev)
142 return -EINVAL;
143
144
145 for (i = 0; i < lldev->nr_tres - 1; i++) {
146 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
147 break;
148 }
149
150 if (i == (lldev->nr_tres - 1))
151 return -ENOMEM;
152
153 tre = &lldev->trepool[i];
154 tre->dma_sig = sig;
155 tre->dev_name = dev_name;
156 tre->callback = callback;
157 tre->data = data;
158 tre->idx = i;
159 tre->status = 0;
160 tre->queued = 0;
161 tre->err_code = 0;
162 tre->err_info = 0;
163 tre->lldev = lldev;
164 tre_local = &tre->tre_local[0];
165 tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
166 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);
167 *tre_ch = i;
168 if (callback)
169 callback(data);
170 return 0;
171 }
172
173
174
175
176 static void hidma_ll_tre_complete(unsigned long arg)
177 {
178 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
179 struct hidma_tre *tre;
180
181 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
182
183 if (tre->callback)
184 tre->callback(tre->data);
185 }
186 }
187
188 static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
189 u8 err_code)
190 {
191 struct hidma_tre *tre;
192 unsigned long flags;
193 u32 tre_iterator;
194
195 spin_lock_irqsave(&lldev->lock, flags);
196
197 tre_iterator = lldev->tre_processed_off;
198 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
199 if (!tre) {
200 spin_unlock_irqrestore(&lldev->lock, flags);
201 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
202 tre_iterator / HIDMA_TRE_SIZE);
203 return -EINVAL;
204 }
205 lldev->pending_tre_list[tre->tre_index] = NULL;
206
207
208
209
210
211 if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
212 dev_warn(lldev->dev, "tre count mismatch on completion");
213 atomic_set(&lldev->pending_tre_count, 0);
214 }
215
216 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
217 lldev->tre_ring_size);
218 lldev->tre_processed_off = tre_iterator;
219 spin_unlock_irqrestore(&lldev->lock, flags);
220
221 tre->err_info = err_info;
222 tre->err_code = err_code;
223 tre->queued = 0;
224
225 kfifo_put(&lldev->handoff_fifo, tre);
226 tasklet_schedule(&lldev->task);
227
228 return 0;
229 }
230
231
232
233
234
235
236
237 static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
238 {
239 u32 evre_ring_size = lldev->evre_ring_size;
240 u32 err_info, err_code, evre_write_off;
241 u32 evre_iterator;
242 u32 num_completed = 0;
243
244 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
245 evre_iterator = lldev->evre_processed_off;
246
247 if ((evre_write_off > evre_ring_size) ||
248 (evre_write_off % HIDMA_EVRE_SIZE)) {
249 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
250 return 0;
251 }
252
253
254
255
256
257 while ((evre_iterator != evre_write_off)) {
258 u32 *current_evre = lldev->evre_ring + evre_iterator;
259 u32 cfg;
260
261 cfg = current_evre[HIDMA_EVRE_CFG_IDX];
262 err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
263 err_info &= HIDMA_EVRE_ERRINFO_MASK;
264 err_code =
265 (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
266
267 if (hidma_post_completed(lldev, err_info, err_code))
268 break;
269
270 HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
271 evre_ring_size);
272
273
274
275
276
277
278 evre_write_off =
279 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
280 num_completed++;
281
282
283
284
285
286 if (!hidma_ll_isenabled(lldev))
287 break;
288 }
289
290 if (num_completed) {
291 u32 evre_read_off = (lldev->evre_processed_off +
292 HIDMA_EVRE_SIZE * num_completed);
293 evre_read_off = evre_read_off % evre_ring_size;
294 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
295
296
297 lldev->evre_processed_off = evre_read_off;
298 }
299
300 return num_completed;
301 }
302
303 void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
304 u8 err_code)
305 {
306 while (atomic_read(&lldev->pending_tre_count)) {
307 if (hidma_post_completed(lldev, err_info, err_code))
308 break;
309 }
310 }
311
312 static int hidma_ll_reset(struct hidma_lldev *lldev)
313 {
314 u32 val;
315 int ret;
316
317 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
318 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
319 val |= HIDMA_CH_RESET << 16;
320 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
321
322
323
324
325
326 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
327 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
328 1000, 10000);
329 if (ret) {
330 dev_err(lldev->dev, "transfer channel did not reset\n");
331 return ret;
332 }
333
334 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
335 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
336 val |= HIDMA_CH_RESET << 16;
337 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
338
339
340
341
342
343 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
344 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
345 1000, 10000);
346 if (ret)
347 return ret;
348
349 lldev->trch_state = HIDMA_CH_DISABLED;
350 lldev->evch_state = HIDMA_CH_DISABLED;
351 return 0;
352 }
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386 static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
387 {
388 unsigned long irqflags;
389
390 if (cause & HIDMA_ERR_INT_MASK) {
391 dev_err(lldev->dev, "error 0x%x, disabling...\n",
392 cause);
393
394
395 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
396
397
398 hidma_ll_disable(lldev);
399
400
401 hidma_cleanup_pending_tre(lldev, 0xFF,
402 HIDMA_EVRE_STATUS_ERROR);
403
404 return;
405 }
406
407 spin_lock_irqsave(&lldev->lock, irqflags);
408 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
409 spin_unlock_irqrestore(&lldev->lock, irqflags);
410
411
412
413
414
415
416
417
418
419
420
421 hidma_handle_tre_completion(lldev);
422 }
423
424 irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
425 {
426 struct hidma_lldev *lldev = arg;
427 u32 status;
428 u32 enable;
429 u32 cause;
430
431 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
432 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
433 cause = status & enable;
434
435 while (cause) {
436 hidma_ll_int_handler_internal(lldev, cause);
437
438
439
440
441
442 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
443 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
444 cause = status & enable;
445 }
446
447 return IRQ_HANDLED;
448 }
449
450 irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
451 {
452 struct hidma_lldev *lldev = arg;
453
454 hidma_ll_int_handler_internal(lldev, cause);
455 return IRQ_HANDLED;
456 }
457
458 int hidma_ll_enable(struct hidma_lldev *lldev)
459 {
460 u32 val;
461 int ret;
462
463 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
464 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
465 val |= HIDMA_CH_ENABLE << 16;
466 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
467
468 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
469 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
470 1000, 10000);
471 if (ret) {
472 dev_err(lldev->dev, "event channel did not get enabled\n");
473 return ret;
474 }
475
476 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
477 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
478 val |= HIDMA_CH_ENABLE << 16;
479 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
480
481 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
482 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
483 1000, 10000);
484 if (ret) {
485 dev_err(lldev->dev, "transfer channel did not get enabled\n");
486 return ret;
487 }
488
489 lldev->trch_state = HIDMA_CH_ENABLED;
490 lldev->evch_state = HIDMA_CH_ENABLED;
491
492
493 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
494
495 return 0;
496 }
497
498 void hidma_ll_start(struct hidma_lldev *lldev)
499 {
500 unsigned long irqflags;
501
502 spin_lock_irqsave(&lldev->lock, irqflags);
503 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
504 spin_unlock_irqrestore(&lldev->lock, irqflags);
505 }
506
507 bool hidma_ll_isenabled(struct hidma_lldev *lldev)
508 {
509 u32 val;
510
511 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
512 lldev->trch_state = HIDMA_CH_STATE(val);
513 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
514 lldev->evch_state = HIDMA_CH_STATE(val);
515
516
517 if (hidma_is_chan_enabled(lldev->trch_state) &&
518 hidma_is_chan_enabled(lldev->evch_state))
519 return true;
520
521 return false;
522 }
523
524 void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
525 {
526 struct hidma_tre *tre;
527 unsigned long flags;
528
529 tre = &lldev->trepool[tre_ch];
530
531
532 spin_lock_irqsave(&lldev->lock, flags);
533 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
534 lldev->pending_tre_list[tre->tre_index] = tre;
535 memcpy(lldev->tre_ring + lldev->tre_write_offset,
536 &tre->tre_local[0], HIDMA_TRE_SIZE);
537 tre->err_code = 0;
538 tre->err_info = 0;
539 tre->queued = 1;
540 atomic_inc(&lldev->pending_tre_count);
541 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
542 % lldev->tre_ring_size;
543 spin_unlock_irqrestore(&lldev->lock, flags);
544 }
545
546
547
548
549
550
551 int hidma_ll_disable(struct hidma_lldev *lldev)
552 {
553 u32 val;
554 int ret;
555
556
557 if (!hidma_ll_isenabled(lldev))
558 return 0;
559
560 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
561 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
562 val |= HIDMA_CH_SUSPEND << 16;
563 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
564
565
566
567
568
569 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
570 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
571 1000, 10000);
572 if (ret)
573 return ret;
574
575 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
576 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
577 val |= HIDMA_CH_SUSPEND << 16;
578 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
579
580
581
582
583
584 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
585 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
586 1000, 10000);
587 if (ret)
588 return ret;
589
590 lldev->trch_state = HIDMA_CH_SUSPENDED;
591 lldev->evch_state = HIDMA_CH_SUSPENDED;
592
593
594 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
595 return 0;
596 }
597
598 void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
599 dma_addr_t src, dma_addr_t dest, u32 len,
600 u32 flags, u32 txntype)
601 {
602 struct hidma_tre *tre;
603 u32 *tre_local;
604
605 if (tre_ch >= lldev->nr_tres) {
606 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
607 tre_ch);
608 return;
609 }
610
611 tre = &lldev->trepool[tre_ch];
612 if (atomic_read(&tre->allocated) != true) {
613 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
614 tre_ch);
615 return;
616 }
617
618 tre_local = &tre->tre_local[0];
619 tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
620 tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
621 tre_local[HIDMA_TRE_LEN_IDX] = len;
622 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
623 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
624 tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
625 tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
626 tre->int_flags = flags;
627 }
628
629
630
631
632
633 int hidma_ll_setup(struct hidma_lldev *lldev)
634 {
635 int rc;
636 u64 addr;
637 u32 val;
638 u32 nr_tres = lldev->nr_tres;
639
640 atomic_set(&lldev->pending_tre_count, 0);
641 lldev->tre_processed_off = 0;
642 lldev->evre_processed_off = 0;
643 lldev->tre_write_offset = 0;
644
645
646 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
647
648
649 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
650 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
651
652 rc = hidma_ll_reset(lldev);
653 if (rc)
654 return rc;
655
656
657
658
659
660 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
661 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
662
663
664 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
665
666 addr = lldev->tre_dma;
667 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
668 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
669 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
670
671 addr = lldev->evre_dma;
672 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
673 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
674 writel(HIDMA_EVRE_SIZE * nr_tres,
675 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
676
677
678 hidma_ll_setup_irq(lldev, lldev->msi_support);
679
680 rc = hidma_ll_enable(lldev);
681 if (rc)
682 return rc;
683
684 return rc;
685 }
686
687 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
688 {
689 u32 val;
690
691 lldev->msi_support = msi;
692
693
694 writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
695 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
696
697
698 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
699 val &= ~0xF;
700 if (!lldev->msi_support)
701 val = val | 0x1;
702 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
703
704
705 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
706 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
707 }
708
709 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
710 void __iomem *trca, void __iomem *evca,
711 u8 chidx)
712 {
713 u32 required_bytes;
714 struct hidma_lldev *lldev;
715 int rc;
716 size_t sz;
717
718 if (!trca || !evca || !dev || !nr_tres)
719 return NULL;
720
721
722 if (nr_tres < 4)
723 return NULL;
724
725
726 nr_tres += 1;
727
728 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
729 if (!lldev)
730 return NULL;
731
732 lldev->evca = evca;
733 lldev->trca = trca;
734 lldev->dev = dev;
735 sz = sizeof(struct hidma_tre);
736 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
737 if (!lldev->trepool)
738 return NULL;
739
740 required_bytes = sizeof(lldev->pending_tre_list[0]);
741 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
742 GFP_KERNEL);
743 if (!lldev->pending_tre_list)
744 return NULL;
745
746 sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
747 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
748 GFP_KERNEL);
749 if (!lldev->tre_ring)
750 return NULL;
751
752 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
753 lldev->nr_tres = nr_tres;
754
755
756 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
757 u8 tre_ring_shift;
758
759 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
760 tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
761 lldev->tre_dma += tre_ring_shift;
762 lldev->tre_ring += tre_ring_shift;
763 }
764
765 sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
766 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
767 GFP_KERNEL);
768 if (!lldev->evre_ring)
769 return NULL;
770
771 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
772
773
774 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
775 u8 evre_ring_shift;
776
777 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
778 evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
779 lldev->evre_dma += evre_ring_shift;
780 lldev->evre_ring += evre_ring_shift;
781 }
782 lldev->nr_tres = nr_tres;
783 lldev->chidx = chidx;
784
785 sz = nr_tres * sizeof(struct hidma_tre *);
786 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
787 if (rc)
788 return NULL;
789
790 rc = hidma_ll_setup(lldev);
791 if (rc)
792 return NULL;
793
794 spin_lock_init(&lldev->lock);
795 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
796 lldev->initialized = 1;
797 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
798 return lldev;
799 }
800
801 int hidma_ll_uninit(struct hidma_lldev *lldev)
802 {
803 u32 required_bytes;
804 int rc = 0;
805 u32 val;
806
807 if (!lldev)
808 return -ENODEV;
809
810 if (!lldev->initialized)
811 return 0;
812
813 lldev->initialized = 0;
814
815 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
816 tasklet_kill(&lldev->task);
817 memset(lldev->trepool, 0, required_bytes);
818 lldev->trepool = NULL;
819 atomic_set(&lldev->pending_tre_count, 0);
820 lldev->tre_write_offset = 0;
821
822 rc = hidma_ll_reset(lldev);
823
824
825
826
827
828 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
829 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
830 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
831 return rc;
832 }
833
834 enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
835 {
836 enum dma_status ret = DMA_ERROR;
837 struct hidma_tre *tre;
838 unsigned long flags;
839 u8 err_code;
840
841 spin_lock_irqsave(&lldev->lock, flags);
842
843 tre = &lldev->trepool[tre_ch];
844 err_code = tre->err_code;
845
846 if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
847 ret = DMA_COMPLETE;
848 else if (err_code & HIDMA_EVRE_STATUS_ERROR)
849 ret = DMA_ERROR;
850 else
851 ret = DMA_IN_PROGRESS;
852 spin_unlock_irqrestore(&lldev->lock, flags);
853
854 return ret;
855 }