This source file includes following definitions.
- dma_submit_error
- dma_chan_name
- dma_set_unmap
- dma_set_unmap
- dmaengine_get_unmap_data
- dmaengine_unmap_put
- dma_descriptor_unmap
- txd_lock
- txd_unlock
- txd_chain
- txd_clear_parent
- txd_clear_next
- txd_next
- txd_parent
- txd_lock
- txd_unlock
- txd_chain
- txd_clear_parent
- txd_clear_next
- txd_parent
- txd_next
- dmaengine_slave_config
- is_slave_direction
- dmaengine_prep_slave_single
- dmaengine_prep_slave_sg
- dmaengine_prep_rio_sg
- dmaengine_prep_dma_cyclic
- dmaengine_prep_interleaved_dma
- dmaengine_prep_dma_memset
- dmaengine_prep_dma_memcpy
- dmaengine_terminate_all
- dmaengine_terminate_async
- dmaengine_synchronize
- dmaengine_terminate_sync
- dmaengine_pause
- dmaengine_resume
- dmaengine_tx_status
- dmaengine_submit
- dmaengine_check_align
- is_dma_copy_aligned
- is_dma_xor_aligned
- is_dma_pq_aligned
- is_dma_fill_aligned
- dma_set_maxpq
- dmaf_continue
- dmaf_p_disabled_continue
- dma_dev_has_pq_continue
- dma_dev_to_maxpq
- dma_maxpq
- dmaengine_get_icg
- dmaengine_get_dst_icg
- dmaengine_get_src_icg
- dmaengine_get
- dmaengine_put
- async_dmaengine_get
- async_dmaengine_put
- async_dma_find_channel
- async_tx_ack
- async_tx_clear_ack
- async_tx_test_ack
- __dma_cap_set
- __dma_cap_clear
- __dma_cap_zero
- __dma_has_cap
- dma_async_issue_pending
- dma_async_is_tx_complete
- dma_async_is_complete
- dma_set_tx_state
- dma_find_channel
- dma_sync_wait
- dma_wait_for_async_tx
- dma_issue_pending_all
- __dma_request_channel
- dma_request_slave_channel
- dma_request_chan
- dma_request_chan_by_mask
- dma_release_channel
- dma_get_slave_caps
- dmaengine_desc_set_reuse
- dmaengine_desc_clear_reuse
- dmaengine_desc_test_reuse
- dmaengine_desc_free
- __dma_request_slave_channel_compat
1
2
3
4
5 #ifndef LINUX_DMAENGINE_H
6 #define LINUX_DMAENGINE_H
7
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/uio.h>
11 #include <linux/bug.h>
12 #include <linux/scatterlist.h>
13 #include <linux/bitmap.h>
14 #include <linux/types.h>
15 #include <asm/page.h>
16
17
18
19
20
21
22 typedef s32 dma_cookie_t;
23 #define DMA_MIN_COOKIE 1
24
25 static inline int dma_submit_error(dma_cookie_t cookie)
26 {
27 return cookie < 0 ? cookie : 0;
28 }
29
30
31
32
33
34
35
36
37 enum dma_status {
38 DMA_COMPLETE,
39 DMA_IN_PROGRESS,
40 DMA_PAUSED,
41 DMA_ERROR,
42 };
43
44
45
46
47
48
49
50 enum dma_transaction_type {
51 DMA_MEMCPY,
52 DMA_XOR,
53 DMA_PQ,
54 DMA_XOR_VAL,
55 DMA_PQ_VAL,
56 DMA_MEMSET,
57 DMA_MEMSET_SG,
58 DMA_INTERRUPT,
59 DMA_PRIVATE,
60 DMA_ASYNC_TX,
61 DMA_SLAVE,
62 DMA_CYCLIC,
63 DMA_INTERLEAVE,
64
65 DMA_TX_TYPE_END,
66 };
67
68
69
70
71
72
73
74
75 enum dma_transfer_direction {
76 DMA_MEM_TO_MEM,
77 DMA_MEM_TO_DEV,
78 DMA_DEV_TO_MEM,
79 DMA_DEV_TO_DEV,
80 DMA_TRANS_NONE,
81 };
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121 struct data_chunk {
122 size_t size;
123 size_t icg;
124 size_t dst_icg;
125 size_t src_icg;
126 };
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146 struct dma_interleaved_template {
147 dma_addr_t src_start;
148 dma_addr_t dst_start;
149 enum dma_transfer_direction dir;
150 bool src_inc;
151 bool dst_inc;
152 bool src_sgl;
153 bool dst_sgl;
154 size_t numf;
155 size_t frame_size;
156 struct data_chunk sgl[0];
157 };
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180 enum dma_ctrl_flags {
181 DMA_PREP_INTERRUPT = (1 << 0),
182 DMA_CTRL_ACK = (1 << 1),
183 DMA_PREP_PQ_DISABLE_P = (1 << 2),
184 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
185 DMA_PREP_CONTINUE = (1 << 4),
186 DMA_PREP_FENCE = (1 << 5),
187 DMA_CTRL_REUSE = (1 << 6),
188 DMA_PREP_CMD = (1 << 7),
189 };
190
191
192
193
194 enum sum_check_bits {
195 SUM_CHECK_P = 0,
196 SUM_CHECK_Q = 1,
197 };
198
199
200
201
202
203
204 enum sum_check_flags {
205 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
206 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
207 };
208
209
210
211
212
213
214 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
215
216
217
218
219
220
221
222 struct dma_chan_percpu {
223
224 unsigned long memcpy_count;
225 unsigned long bytes_transferred;
226 };
227
228
229
230
231
232
233 struct dma_router {
234 struct device *dev;
235 void (*route_free)(struct device *dev, void *route_data);
236 };
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253 struct dma_chan {
254 struct dma_device *device;
255 dma_cookie_t cookie;
256 dma_cookie_t completed_cookie;
257
258
259 int chan_id;
260 struct dma_chan_dev *dev;
261
262 struct list_head device_node;
263 struct dma_chan_percpu __percpu *local;
264 int client_count;
265 int table_count;
266
267
268 struct dma_router *router;
269 void *route_data;
270
271 void *private;
272 };
273
274
275
276
277
278
279
280
281 struct dma_chan_dev {
282 struct dma_chan *chan;
283 struct device device;
284 int dev_id;
285 atomic_t *idr_ref;
286 };
287
288
289
290
291
292 enum dma_slave_buswidth {
293 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
294 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
295 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
296 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
297 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
298 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
299 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
300 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
301 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
302 };
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355 struct dma_slave_config {
356 enum dma_transfer_direction direction;
357 phys_addr_t src_addr;
358 phys_addr_t dst_addr;
359 enum dma_slave_buswidth src_addr_width;
360 enum dma_slave_buswidth dst_addr_width;
361 u32 src_maxburst;
362 u32 dst_maxburst;
363 u32 src_port_window_size;
364 u32 dst_port_window_size;
365 bool device_fc;
366 unsigned int slave_id;
367 };
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389 enum dma_residue_granularity {
390 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
391 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
392 DMA_RESIDUE_GRANULARITY_BURST = 2,
393 };
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 struct dma_slave_caps {
415 u32 src_addr_widths;
416 u32 dst_addr_widths;
417 u32 directions;
418 u32 max_burst;
419 bool cmd_pause;
420 bool cmd_resume;
421 bool cmd_terminate;
422 enum dma_residue_granularity residue_granularity;
423 bool descriptor_reuse;
424 };
425
426 static inline const char *dma_chan_name(struct dma_chan *chan)
427 {
428 return dev_name(&chan->dev->device);
429 }
430
431 void dma_chan_cleanup(struct kref *kref);
432
433
434
435
436
437
438
439
440
441
442
443
444 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
445
446 typedef void (*dma_async_tx_callback)(void *dma_async_param);
447
448 enum dmaengine_tx_result {
449 DMA_TRANS_NOERROR = 0,
450 DMA_TRANS_READ_FAILED,
451 DMA_TRANS_WRITE_FAILED,
452 DMA_TRANS_ABORTED,
453 };
454
455 struct dmaengine_result {
456 enum dmaengine_tx_result result;
457 u32 residue;
458 };
459
460 typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
461 const struct dmaengine_result *result);
462
463 struct dmaengine_unmap_data {
464 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
465 u16 map_cnt;
466 #else
467 u8 map_cnt;
468 #endif
469 u8 to_cnt;
470 u8 from_cnt;
471 u8 bidi_cnt;
472 struct device *dev;
473 struct kref kref;
474 size_t len;
475 dma_addr_t addr[0];
476 };
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496 struct dma_async_tx_descriptor {
497 dma_cookie_t cookie;
498 enum dma_ctrl_flags flags;
499 dma_addr_t phys;
500 struct dma_chan *chan;
501 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
502 int (*desc_free)(struct dma_async_tx_descriptor *tx);
503 dma_async_tx_callback callback;
504 dma_async_tx_callback_result callback_result;
505 void *callback_param;
506 struct dmaengine_unmap_data *unmap;
507 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
508 struct dma_async_tx_descriptor *next;
509 struct dma_async_tx_descriptor *parent;
510 spinlock_t lock;
511 #endif
512 };
513
514 #ifdef CONFIG_DMA_ENGINE
515 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
516 struct dmaengine_unmap_data *unmap)
517 {
518 kref_get(&unmap->kref);
519 tx->unmap = unmap;
520 }
521
522 struct dmaengine_unmap_data *
523 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
524 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
525 #else
526 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
527 struct dmaengine_unmap_data *unmap)
528 {
529 }
530 static inline struct dmaengine_unmap_data *
531 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
532 {
533 return NULL;
534 }
535 static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
536 {
537 }
538 #endif
539
540 static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
541 {
542 if (tx->unmap) {
543 dmaengine_unmap_put(tx->unmap);
544 tx->unmap = NULL;
545 }
546 }
547
548 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
549 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
550 {
551 }
552 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
553 {
554 }
555 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
556 {
557 BUG();
558 }
559 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
560 {
561 }
562 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
563 {
564 }
565 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
566 {
567 return NULL;
568 }
569 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
570 {
571 return NULL;
572 }
573
574 #else
575 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
576 {
577 spin_lock_bh(&txd->lock);
578 }
579 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
580 {
581 spin_unlock_bh(&txd->lock);
582 }
583 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
584 {
585 txd->next = next;
586 next->parent = txd;
587 }
588 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
589 {
590 txd->parent = NULL;
591 }
592 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
593 {
594 txd->next = NULL;
595 }
596 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
597 {
598 return txd->parent;
599 }
600 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
601 {
602 return txd->next;
603 }
604 #endif
605
606
607
608
609
610
611
612
613
614
615 struct dma_tx_state {
616 dma_cookie_t last;
617 dma_cookie_t used;
618 u32 residue;
619 };
620
621
622
623
624
625 enum dmaengine_alignment {
626 DMAENGINE_ALIGN_1_BYTE = 0,
627 DMAENGINE_ALIGN_2_BYTES = 1,
628 DMAENGINE_ALIGN_4_BYTES = 2,
629 DMAENGINE_ALIGN_8_BYTES = 3,
630 DMAENGINE_ALIGN_16_BYTES = 4,
631 DMAENGINE_ALIGN_32_BYTES = 5,
632 DMAENGINE_ALIGN_64_BYTES = 6,
633 };
634
635
636
637
638
639
640
641
642 struct dma_slave_map {
643 const char *devname;
644 const char *slave;
645 void *param;
646 };
647
648
649
650
651
652
653
654
655 struct dma_filter {
656 dma_filter_fn fn;
657 int mapcnt;
658 const struct dma_slave_map *map;
659 };
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723 struct dma_device {
724
725 unsigned int chancnt;
726 unsigned int privatecnt;
727 struct list_head channels;
728 struct list_head global_node;
729 struct dma_filter filter;
730 dma_cap_mask_t cap_mask;
731 unsigned short max_xor;
732 unsigned short max_pq;
733 enum dmaengine_alignment copy_align;
734 enum dmaengine_alignment xor_align;
735 enum dmaengine_alignment pq_align;
736 enum dmaengine_alignment fill_align;
737 #define DMA_HAS_PQ_CONTINUE (1 << 15)
738
739 int dev_id;
740 struct device *dev;
741 struct module *owner;
742
743 u32 src_addr_widths;
744 u32 dst_addr_widths;
745 u32 directions;
746 u32 max_burst;
747 bool descriptor_reuse;
748 enum dma_residue_granularity residue_granularity;
749
750 int (*device_alloc_chan_resources)(struct dma_chan *chan);
751 void (*device_free_chan_resources)(struct dma_chan *chan);
752
753 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
754 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
755 size_t len, unsigned long flags);
756 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
757 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
758 unsigned int src_cnt, size_t len, unsigned long flags);
759 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
760 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
761 size_t len, enum sum_check_flags *result, unsigned long flags);
762 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
763 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
764 unsigned int src_cnt, const unsigned char *scf,
765 size_t len, unsigned long flags);
766 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
767 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
768 unsigned int src_cnt, const unsigned char *scf, size_t len,
769 enum sum_check_flags *pqres, unsigned long flags);
770 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
771 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
772 unsigned long flags);
773 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
774 struct dma_chan *chan, struct scatterlist *sg,
775 unsigned int nents, int value, unsigned long flags);
776 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
777 struct dma_chan *chan, unsigned long flags);
778
779 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
780 struct dma_chan *chan, struct scatterlist *sgl,
781 unsigned int sg_len, enum dma_transfer_direction direction,
782 unsigned long flags, void *context);
783 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
784 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
785 size_t period_len, enum dma_transfer_direction direction,
786 unsigned long flags);
787 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
788 struct dma_chan *chan, struct dma_interleaved_template *xt,
789 unsigned long flags);
790 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
791 struct dma_chan *chan, dma_addr_t dst, u64 data,
792 unsigned long flags);
793
794 int (*device_config)(struct dma_chan *chan,
795 struct dma_slave_config *config);
796 int (*device_pause)(struct dma_chan *chan);
797 int (*device_resume)(struct dma_chan *chan);
798 int (*device_terminate_all)(struct dma_chan *chan);
799 void (*device_synchronize)(struct dma_chan *chan);
800
801 enum dma_status (*device_tx_status)(struct dma_chan *chan,
802 dma_cookie_t cookie,
803 struct dma_tx_state *txstate);
804 void (*device_issue_pending)(struct dma_chan *chan);
805 };
806
807 static inline int dmaengine_slave_config(struct dma_chan *chan,
808 struct dma_slave_config *config)
809 {
810 if (chan->device->device_config)
811 return chan->device->device_config(chan, config);
812
813 return -ENOSYS;
814 }
815
816 static inline bool is_slave_direction(enum dma_transfer_direction direction)
817 {
818 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
819 }
820
821 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
822 struct dma_chan *chan, dma_addr_t buf, size_t len,
823 enum dma_transfer_direction dir, unsigned long flags)
824 {
825 struct scatterlist sg;
826 sg_init_table(&sg, 1);
827 sg_dma_address(&sg) = buf;
828 sg_dma_len(&sg) = len;
829
830 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
831 return NULL;
832
833 return chan->device->device_prep_slave_sg(chan, &sg, 1,
834 dir, flags, NULL);
835 }
836
837 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
838 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
839 enum dma_transfer_direction dir, unsigned long flags)
840 {
841 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
842 return NULL;
843
844 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
845 dir, flags, NULL);
846 }
847
848 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
849 struct rio_dma_ext;
850 static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
851 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
852 enum dma_transfer_direction dir, unsigned long flags,
853 struct rio_dma_ext *rio_ext)
854 {
855 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
856 return NULL;
857
858 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
859 dir, flags, rio_ext);
860 }
861 #endif
862
863 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
864 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
865 size_t period_len, enum dma_transfer_direction dir,
866 unsigned long flags)
867 {
868 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
869 return NULL;
870
871 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
872 period_len, dir, flags);
873 }
874
875 static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
876 struct dma_chan *chan, struct dma_interleaved_template *xt,
877 unsigned long flags)
878 {
879 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
880 return NULL;
881
882 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
883 }
884
885 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
886 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
887 unsigned long flags)
888 {
889 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
890 return NULL;
891
892 return chan->device->device_prep_dma_memset(chan, dest, value,
893 len, flags);
894 }
895
896 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
897 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
898 size_t len, unsigned long flags)
899 {
900 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
901 return NULL;
902
903 return chan->device->device_prep_dma_memcpy(chan, dest, src,
904 len, flags);
905 }
906
907
908
909
910
911
912
913
914 static inline int dmaengine_terminate_all(struct dma_chan *chan)
915 {
916 if (chan->device->device_terminate_all)
917 return chan->device->device_terminate_all(chan);
918
919 return -ENOSYS;
920 }
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943 static inline int dmaengine_terminate_async(struct dma_chan *chan)
944 {
945 if (chan->device->device_terminate_all)
946 return chan->device->device_terminate_all(chan);
947
948 return -EINVAL;
949 }
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969 static inline void dmaengine_synchronize(struct dma_chan *chan)
970 {
971 might_sleep();
972
973 if (chan->device->device_synchronize)
974 chan->device->device_synchronize(chan);
975 }
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991 static inline int dmaengine_terminate_sync(struct dma_chan *chan)
992 {
993 int ret;
994
995 ret = dmaengine_terminate_async(chan);
996 if (ret)
997 return ret;
998
999 dmaengine_synchronize(chan);
1000
1001 return 0;
1002 }
1003
1004 static inline int dmaengine_pause(struct dma_chan *chan)
1005 {
1006 if (chan->device->device_pause)
1007 return chan->device->device_pause(chan);
1008
1009 return -ENOSYS;
1010 }
1011
1012 static inline int dmaengine_resume(struct dma_chan *chan)
1013 {
1014 if (chan->device->device_resume)
1015 return chan->device->device_resume(chan);
1016
1017 return -ENOSYS;
1018 }
1019
1020 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1021 dma_cookie_t cookie, struct dma_tx_state *state)
1022 {
1023 return chan->device->device_tx_status(chan, cookie, state);
1024 }
1025
1026 static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1027 {
1028 return desc->tx_submit(desc);
1029 }
1030
1031 static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1032 size_t off1, size_t off2, size_t len)
1033 {
1034 size_t mask;
1035
1036 if (!align)
1037 return true;
1038 mask = (1 << align) - 1;
1039 if (mask & (off1 | off2 | len))
1040 return false;
1041 return true;
1042 }
1043
1044 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1045 size_t off2, size_t len)
1046 {
1047 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1048 }
1049
1050 static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1051 size_t off2, size_t len)
1052 {
1053 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1054 }
1055
1056 static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1057 size_t off2, size_t len)
1058 {
1059 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1060 }
1061
1062 static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1063 size_t off2, size_t len)
1064 {
1065 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1066 }
1067
1068 static inline void
1069 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1070 {
1071 dma->max_pq = maxpq;
1072 if (has_pq_continue)
1073 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1074 }
1075
1076 static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1077 {
1078 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1079 }
1080
1081 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1082 {
1083 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1084
1085 return (flags & mask) == mask;
1086 }
1087
1088 static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1089 {
1090 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1091 }
1092
1093 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1094 {
1095 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1112 {
1113 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1114 return dma_dev_to_maxpq(dma);
1115 else if (dmaf_p_disabled_continue(flags))
1116 return dma_dev_to_maxpq(dma) - 1;
1117 else if (dmaf_continue(flags))
1118 return dma_dev_to_maxpq(dma) - 3;
1119 BUG();
1120 }
1121
1122 static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1123 size_t dir_icg)
1124 {
1125 if (inc) {
1126 if (dir_icg)
1127 return dir_icg;
1128 else if (sgl)
1129 return icg;
1130 }
1131
1132 return 0;
1133 }
1134
1135 static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1136 struct data_chunk *chunk)
1137 {
1138 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1139 chunk->icg, chunk->dst_icg);
1140 }
1141
1142 static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1143 struct data_chunk *chunk)
1144 {
1145 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1146 chunk->icg, chunk->src_icg);
1147 }
1148
1149
1150
1151 #ifdef CONFIG_DMA_ENGINE
1152 void dmaengine_get(void);
1153 void dmaengine_put(void);
1154 #else
1155 static inline void dmaengine_get(void)
1156 {
1157 }
1158 static inline void dmaengine_put(void)
1159 {
1160 }
1161 #endif
1162
1163 #ifdef CONFIG_ASYNC_TX_DMA
1164 #define async_dmaengine_get() dmaengine_get()
1165 #define async_dmaengine_put() dmaengine_put()
1166 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1167 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1168 #else
1169 #define async_dma_find_channel(type) dma_find_channel(type)
1170 #endif
1171 #else
1172 static inline void async_dmaengine_get(void)
1173 {
1174 }
1175 static inline void async_dmaengine_put(void)
1176 {
1177 }
1178 static inline struct dma_chan *
1179 async_dma_find_channel(enum dma_transaction_type type)
1180 {
1181 return NULL;
1182 }
1183 #endif
1184 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1185 struct dma_chan *chan);
1186
1187 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1188 {
1189 tx->flags |= DMA_CTRL_ACK;
1190 }
1191
1192 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1193 {
1194 tx->flags &= ~DMA_CTRL_ACK;
1195 }
1196
1197 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1198 {
1199 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1200 }
1201
1202 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1203 static inline void
1204 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1205 {
1206 set_bit(tx_type, dstp->bits);
1207 }
1208
1209 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1210 static inline void
1211 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1212 {
1213 clear_bit(tx_type, dstp->bits);
1214 }
1215
1216 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1217 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1218 {
1219 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1220 }
1221
1222 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1223 static inline int
1224 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1225 {
1226 return test_bit(tx_type, srcp->bits);
1227 }
1228
1229 #define for_each_dma_cap_mask(cap, mask) \
1230 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1231
1232
1233
1234
1235
1236
1237
1238
1239 static inline void dma_async_issue_pending(struct dma_chan *chan)
1240 {
1241 chan->device->device_issue_pending(chan);
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1256 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1257 {
1258 struct dma_tx_state state;
1259 enum dma_status status;
1260
1261 status = chan->device->device_tx_status(chan, cookie, &state);
1262 if (last)
1263 *last = state.last;
1264 if (used)
1265 *used = state.used;
1266 return status;
1267 }
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1279 dma_cookie_t last_complete, dma_cookie_t last_used)
1280 {
1281 if (last_complete <= last_used) {
1282 if ((cookie <= last_complete) || (cookie > last_used))
1283 return DMA_COMPLETE;
1284 } else {
1285 if ((cookie <= last_complete) && (cookie > last_used))
1286 return DMA_COMPLETE;
1287 }
1288 return DMA_IN_PROGRESS;
1289 }
1290
1291 static inline void
1292 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1293 {
1294 if (st) {
1295 st->last = last;
1296 st->used = used;
1297 st->residue = residue;
1298 }
1299 }
1300
1301 #ifdef CONFIG_DMA_ENGINE
1302 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1303 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1304 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1305 void dma_issue_pending_all(void);
1306 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1307 dma_filter_fn fn, void *fn_param,
1308 struct device_node *np);
1309 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1310
1311 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1312 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1313
1314 void dma_release_channel(struct dma_chan *chan);
1315 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1316 #else
1317 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1318 {
1319 return NULL;
1320 }
1321 static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1322 {
1323 return DMA_COMPLETE;
1324 }
1325 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1326 {
1327 return DMA_COMPLETE;
1328 }
1329 static inline void dma_issue_pending_all(void)
1330 {
1331 }
1332 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1333 dma_filter_fn fn,
1334 void *fn_param,
1335 struct device_node *np)
1336 {
1337 return NULL;
1338 }
1339 static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1340 const char *name)
1341 {
1342 return NULL;
1343 }
1344 static inline struct dma_chan *dma_request_chan(struct device *dev,
1345 const char *name)
1346 {
1347 return ERR_PTR(-ENODEV);
1348 }
1349 static inline struct dma_chan *dma_request_chan_by_mask(
1350 const dma_cap_mask_t *mask)
1351 {
1352 return ERR_PTR(-ENODEV);
1353 }
1354 static inline void dma_release_channel(struct dma_chan *chan)
1355 {
1356 }
1357 static inline int dma_get_slave_caps(struct dma_chan *chan,
1358 struct dma_slave_caps *caps)
1359 {
1360 return -ENXIO;
1361 }
1362 #endif
1363
1364 #define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1365
1366 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1367 {
1368 struct dma_slave_caps caps;
1369 int ret;
1370
1371 ret = dma_get_slave_caps(tx->chan, &caps);
1372 if (ret)
1373 return ret;
1374
1375 if (caps.descriptor_reuse) {
1376 tx->flags |= DMA_CTRL_REUSE;
1377 return 0;
1378 } else {
1379 return -EPERM;
1380 }
1381 }
1382
1383 static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1384 {
1385 tx->flags &= ~DMA_CTRL_REUSE;
1386 }
1387
1388 static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1389 {
1390 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1391 }
1392
1393 static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1394 {
1395
1396 if (dmaengine_desc_test_reuse(desc))
1397 return desc->desc_free(desc);
1398 else
1399 return -EPERM;
1400 }
1401
1402
1403
1404 int dma_async_device_register(struct dma_device *device);
1405 int dmaenginem_async_device_register(struct dma_device *device);
1406 void dma_async_device_unregister(struct dma_device *device);
1407 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1408 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1409 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1410 #define dma_request_channel(mask, x, y) \
1411 __dma_request_channel(&(mask), x, y, NULL)
1412 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1413 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1414
1415 static inline struct dma_chan
1416 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1417 dma_filter_fn fn, void *fn_param,
1418 struct device *dev, const char *name)
1419 {
1420 struct dma_chan *chan;
1421
1422 chan = dma_request_slave_channel(dev, name);
1423 if (chan)
1424 return chan;
1425
1426 if (!fn || !fn_param)
1427 return NULL;
1428
1429 return __dma_request_channel(mask, fn, fn_param, NULL);
1430 }
1431 #endif