This source file includes following definitions.
- vsp1_dl_body_pool_create
- vsp1_dl_body_pool_destroy
- vsp1_dl_body_get
- vsp1_dl_body_put
- vsp1_dl_body_write
- vsp1_dl_cmd_pool_create
- vsp1_dl_ext_cmd_get
- vsp1_dl_ext_cmd_put
- vsp1_dl_ext_cmd_pool_destroy
- vsp1_dl_get_pre_cmd
- vsp1_dl_list_alloc
- vsp1_dl_list_bodies_put
- vsp1_dl_list_free
- vsp1_dl_list_get
- __vsp1_dl_list_put
- vsp1_dl_list_put
- vsp1_dl_list_get_body0
- vsp1_dl_list_add_body
- vsp1_dl_list_add_chain
- vsp1_dl_ext_cmd_fill_header
- vsp1_dl_list_fill_header
- vsp1_dl_list_hw_update_pending
- vsp1_dl_list_hw_enqueue
- vsp1_dl_list_commit_continuous
- vsp1_dl_list_commit_singleshot
- vsp1_dl_list_commit
- vsp1_dlm_irq_frame_end
- vsp1_dlm_setup
- vsp1_dlm_reset
- vsp1_dlm_dl_body_get
- vsp1_dlm_create
- vsp1_dlm_destroy
1
2
3
4
5
6
7
8
9
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/gfp.h>
13 #include <linux/refcount.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16
17 #include "vsp1.h"
18 #include "vsp1_dl.h"
19
20 #define VSP1_DL_NUM_ENTRIES 256
21
22 #define VSP1_DLH_INT_ENABLE (1 << 1)
23 #define VSP1_DLH_AUTO_START (1 << 0)
24
25 #define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
26 #define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
27
28 struct vsp1_dl_header_list {
29 u32 num_bytes;
30 u32 addr;
31 } __packed;
32
33 struct vsp1_dl_header {
34 u32 num_lists;
35 struct vsp1_dl_header_list lists[8];
36 u32 next_header;
37 u32 flags;
38 } __packed;
39
40
41
42
43
44
45
46
47
48
49 struct vsp1_dl_ext_header {
50 u32 padding;
51
52
53
54
55
56
57
58
59
60 u16 pre_ext_dl_num_cmd;
61 u16 flags;
62 u32 pre_ext_dl_plist;
63
64 u32 post_ext_dl_num_cmd;
65 u32 post_ext_dl_plist;
66 } __packed;
67
68 struct vsp1_dl_header_extended {
69 struct vsp1_dl_header header;
70 struct vsp1_dl_ext_header ext;
71 } __packed;
72
73 struct vsp1_dl_entry {
74 u32 addr;
75 u32 data;
76 } __packed;
77
78
79
80
81
82
83
84
85 struct vsp1_pre_ext_dl_body {
86 u32 opcode;
87 u32 flags;
88 u32 address_set;
89 u32 reserved;
90 } __packed;
91
92
93
94
95
96
97
98
99
100
101
102
103
104 struct vsp1_dl_body {
105 struct list_head list;
106 struct list_head free;
107
108 refcount_t refcnt;
109
110 struct vsp1_dl_body_pool *pool;
111
112 struct vsp1_dl_entry *entries;
113 dma_addr_t dma;
114 size_t size;
115
116 unsigned int num_entries;
117 unsigned int max_entries;
118 };
119
120
121
122
123
124
125
126
127
128
129
130 struct vsp1_dl_body_pool {
131
132 dma_addr_t dma;
133 size_t size;
134 void *mem;
135
136
137 struct vsp1_dl_body *bodies;
138 struct list_head free;
139 spinlock_t lock;
140
141 struct vsp1_device *vsp1;
142 };
143
144
145
146
147
148
149
150
151
152
153
154 struct vsp1_dl_cmd_pool {
155
156 dma_addr_t dma;
157 size_t size;
158 void *mem;
159
160 struct vsp1_dl_ext_cmd *cmds;
161 struct list_head free;
162
163 spinlock_t lock;
164
165 struct vsp1_device *vsp1;
166 };
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183 struct vsp1_dl_list {
184 struct list_head list;
185 struct vsp1_dl_manager *dlm;
186
187 struct vsp1_dl_header *header;
188 struct vsp1_dl_ext_header *extension;
189 dma_addr_t dma;
190
191 struct vsp1_dl_body *body0;
192 struct list_head bodies;
193
194 struct vsp1_dl_ext_cmd *pre_cmd;
195 struct vsp1_dl_ext_cmd *post_cmd;
196
197 bool has_chain;
198 struct list_head chain;
199
200 unsigned int flags;
201 };
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216 struct vsp1_dl_manager {
217 unsigned int index;
218 bool singleshot;
219 struct vsp1_device *vsp1;
220
221 spinlock_t lock;
222 struct list_head free;
223 struct vsp1_dl_list *active;
224 struct vsp1_dl_list *queued;
225 struct vsp1_dl_list *pending;
226
227 struct vsp1_dl_body_pool *pool;
228 struct vsp1_dl_cmd_pool *cmdpool;
229 };
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247 struct vsp1_dl_body_pool *
248 vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
249 unsigned int num_entries, size_t extra_size)
250 {
251 struct vsp1_dl_body_pool *pool;
252 size_t dlb_size;
253 unsigned int i;
254
255 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
256 if (!pool)
257 return NULL;
258
259 pool->vsp1 = vsp1;
260
261
262
263
264
265
266
267 dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
268 pool->size = dlb_size * num_bodies;
269
270 pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
271 if (!pool->bodies) {
272 kfree(pool);
273 return NULL;
274 }
275
276 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
277 GFP_KERNEL);
278 if (!pool->mem) {
279 kfree(pool->bodies);
280 kfree(pool);
281 return NULL;
282 }
283
284 spin_lock_init(&pool->lock);
285 INIT_LIST_HEAD(&pool->free);
286
287 for (i = 0; i < num_bodies; ++i) {
288 struct vsp1_dl_body *dlb = &pool->bodies[i];
289
290 dlb->pool = pool;
291 dlb->max_entries = num_entries;
292
293 dlb->dma = pool->dma + i * dlb_size;
294 dlb->entries = pool->mem + i * dlb_size;
295
296 list_add_tail(&dlb->free, &pool->free);
297 }
298
299 return pool;
300 }
301
302
303
304
305
306
307
308 void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
309 {
310 if (!pool)
311 return;
312
313 if (pool->mem)
314 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
315 pool->dma);
316
317 kfree(pool->bodies);
318 kfree(pool);
319 }
320
321
322
323
324
325
326
327
328
329 struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
330 {
331 struct vsp1_dl_body *dlb = NULL;
332 unsigned long flags;
333
334 spin_lock_irqsave(&pool->lock, flags);
335
336 if (!list_empty(&pool->free)) {
337 dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
338 list_del(&dlb->free);
339 refcount_set(&dlb->refcnt, 1);
340 }
341
342 spin_unlock_irqrestore(&pool->lock, flags);
343
344 return dlb;
345 }
346
347
348
349
350
351
352
353 void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
354 {
355 unsigned long flags;
356
357 if (!dlb)
358 return;
359
360 if (!refcount_dec_and_test(&dlb->refcnt))
361 return;
362
363 dlb->num_entries = 0;
364
365 spin_lock_irqsave(&dlb->pool->lock, flags);
366 list_add_tail(&dlb->free, &dlb->pool->free);
367 spin_unlock_irqrestore(&dlb->pool->lock, flags);
368 }
369
370
371
372
373
374
375
376
377
378
379
380 void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
381 {
382 if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
383 "DLB size exceeded (max %u)", dlb->max_entries))
384 return;
385
386 dlb->entries[dlb->num_entries].addr = reg;
387 dlb->entries[dlb->num_entries].data = data;
388 dlb->num_entries++;
389 }
390
391
392
393
394
395 enum vsp1_extcmd_type {
396 VSP1_EXTCMD_AUTODISP,
397 VSP1_EXTCMD_AUTOFLD,
398 };
399
400 struct vsp1_extended_command_info {
401 u16 opcode;
402 size_t body_size;
403 };
404
405 static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
406 [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
407 [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
408 };
409
410
411
412
413
414
415
416
417
418
419
420
421
422 static struct vsp1_dl_cmd_pool *
423 vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
424 unsigned int num_cmds)
425 {
426 struct vsp1_dl_cmd_pool *pool;
427 unsigned int i;
428 size_t cmd_size;
429
430 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
431 if (!pool)
432 return NULL;
433
434 spin_lock_init(&pool->lock);
435 INIT_LIST_HEAD(&pool->free);
436
437 pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
438 if (!pool->cmds) {
439 kfree(pool);
440 return NULL;
441 }
442
443 cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
444 vsp1_extended_commands[type].body_size;
445 cmd_size = ALIGN(cmd_size, 16);
446
447 pool->size = cmd_size * num_cmds;
448 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
449 GFP_KERNEL);
450 if (!pool->mem) {
451 kfree(pool->cmds);
452 kfree(pool);
453 return NULL;
454 }
455
456 for (i = 0; i < num_cmds; ++i) {
457 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
458 size_t cmd_offset = i * cmd_size;
459
460 size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
461 cmd_offset;
462
463 cmd->pool = pool;
464 cmd->opcode = vsp1_extended_commands[type].opcode;
465
466
467
468
469
470 cmd->num_cmds = 1;
471 cmd->cmds = pool->mem + cmd_offset;
472 cmd->cmd_dma = pool->dma + cmd_offset;
473
474 cmd->data = pool->mem + data_offset;
475 cmd->data_dma = pool->dma + data_offset;
476
477 list_add_tail(&cmd->free, &pool->free);
478 }
479
480 return pool;
481 }
482
483 static
484 struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
485 {
486 struct vsp1_dl_ext_cmd *cmd = NULL;
487 unsigned long flags;
488
489 spin_lock_irqsave(&pool->lock, flags);
490
491 if (!list_empty(&pool->free)) {
492 cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
493 free);
494 list_del(&cmd->free);
495 }
496
497 spin_unlock_irqrestore(&pool->lock, flags);
498
499 return cmd;
500 }
501
502 static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
503 {
504 unsigned long flags;
505
506 if (!cmd)
507 return;
508
509
510 cmd->flags = 0;
511
512 spin_lock_irqsave(&cmd->pool->lock, flags);
513 list_add_tail(&cmd->free, &cmd->pool->free);
514 spin_unlock_irqrestore(&cmd->pool->lock, flags);
515 }
516
517 static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
518 {
519 if (!pool)
520 return;
521
522 if (pool->mem)
523 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
524 pool->dma);
525
526 kfree(pool->cmds);
527 kfree(pool);
528 }
529
530 struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
531 {
532 struct vsp1_dl_manager *dlm = dl->dlm;
533
534 if (dl->pre_cmd)
535 return dl->pre_cmd;
536
537 dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
538
539 return dl->pre_cmd;
540 }
541
542
543
544
545
546 static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
547 {
548 struct vsp1_dl_list *dl;
549 size_t header_offset;
550
551 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
552 if (!dl)
553 return NULL;
554
555 INIT_LIST_HEAD(&dl->bodies);
556 dl->dlm = dlm;
557
558
559 dl->body0 = vsp1_dl_body_get(dlm->pool);
560 if (!dl->body0) {
561 kfree(dl);
562 return NULL;
563 }
564
565 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
566
567 dl->header = ((void *)dl->body0->entries) + header_offset;
568 dl->dma = dl->body0->dma + header_offset;
569
570 memset(dl->header, 0, sizeof(*dl->header));
571 dl->header->lists[0].addr = dl->body0->dma;
572
573 return dl;
574 }
575
576 static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
577 {
578 struct vsp1_dl_body *dlb, *tmp;
579
580 list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
581 list_del(&dlb->list);
582 vsp1_dl_body_put(dlb);
583 }
584 }
585
586 static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
587 {
588 vsp1_dl_body_put(dl->body0);
589 vsp1_dl_list_bodies_put(dl);
590
591 kfree(dl);
592 }
593
594
595
596
597
598
599
600
601
602 struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
603 {
604 struct vsp1_dl_list *dl = NULL;
605 unsigned long flags;
606
607 spin_lock_irqsave(&dlm->lock, flags);
608
609 if (!list_empty(&dlm->free)) {
610 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
611 list_del(&dl->list);
612
613
614
615
616
617 INIT_LIST_HEAD(&dl->chain);
618 }
619
620 spin_unlock_irqrestore(&dlm->lock, flags);
621
622 return dl;
623 }
624
625
626 static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
627 {
628 struct vsp1_dl_list *dl_next;
629
630 if (!dl)
631 return;
632
633
634
635
636
637 if (dl->has_chain) {
638 list_for_each_entry(dl_next, &dl->chain, chain)
639 __vsp1_dl_list_put(dl_next);
640 }
641
642 dl->has_chain = false;
643
644 vsp1_dl_list_bodies_put(dl);
645
646 vsp1_dl_ext_cmd_put(dl->pre_cmd);
647 vsp1_dl_ext_cmd_put(dl->post_cmd);
648
649 dl->pre_cmd = NULL;
650 dl->post_cmd = NULL;
651
652
653
654
655
656 dl->body0->num_entries = 0;
657
658 list_add_tail(&dl->list, &dl->dlm->free);
659 }
660
661
662
663
664
665
666
667
668
669
670 void vsp1_dl_list_put(struct vsp1_dl_list *dl)
671 {
672 unsigned long flags;
673
674 if (!dl)
675 return;
676
677 spin_lock_irqsave(&dl->dlm->lock, flags);
678 __vsp1_dl_list_put(dl);
679 spin_unlock_irqrestore(&dl->dlm->lock, flags);
680 }
681
682
683
684
685
686
687
688
689 struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
690 {
691 return dl->body0;
692 }
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710 int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
711 {
712 refcount_inc(&dlb->refcnt);
713
714 list_add_tail(&dlb->list, &dl->bodies);
715
716 return 0;
717 }
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
734 struct vsp1_dl_list *dl)
735 {
736 head->has_chain = true;
737 list_add_tail(&dl->chain, &head->chain);
738 return 0;
739 }
740
741 static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
742 {
743 cmd->cmds[0].opcode = cmd->opcode;
744 cmd->cmds[0].flags = cmd->flags;
745 cmd->cmds[0].address_set = cmd->data_dma;
746 cmd->cmds[0].reserved = 0;
747 }
748
749 static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
750 {
751 struct vsp1_dl_manager *dlm = dl->dlm;
752 struct vsp1_dl_header_list *hdr = dl->header->lists;
753 struct vsp1_dl_body *dlb;
754 unsigned int num_lists = 0;
755
756
757
758
759
760
761
762 hdr->num_bytes = dl->body0->num_entries
763 * sizeof(*dl->header->lists);
764
765 list_for_each_entry(dlb, &dl->bodies, list) {
766 num_lists++;
767 hdr++;
768
769 hdr->addr = dlb->dma;
770 hdr->num_bytes = dlb->num_entries
771 * sizeof(*dl->header->lists);
772 }
773
774 dl->header->num_lists = num_lists;
775 dl->header->flags = 0;
776
777
778
779
780
781
782
783
784 if (!dlm->singleshot || is_last)
785 dl->header->flags |= VSP1_DLH_INT_ENABLE;
786
787
788
789
790
791
792
793 if (!dlm->singleshot || !is_last)
794 dl->header->flags |= VSP1_DLH_AUTO_START;
795
796 if (!is_last) {
797
798
799
800
801 struct vsp1_dl_list *next = list_next_entry(dl, chain);
802
803 dl->header->next_header = next->dma;
804 } else if (!dlm->singleshot) {
805
806
807
808
809
810 dl->header->next_header = dl->dma;
811 }
812
813 if (!dl->extension)
814 return;
815
816 dl->extension->flags = 0;
817
818 if (dl->pre_cmd) {
819 dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
820 dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
821 dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
822
823 vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
824 }
825
826 if (dl->post_cmd) {
827 dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
828 dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
829 dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
830
831 vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
832 }
833 }
834
835 static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
836 {
837 struct vsp1_device *vsp1 = dlm->vsp1;
838
839 if (!dlm->queued)
840 return false;
841
842
843
844
845
846 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
847 }
848
849 static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
850 {
851 struct vsp1_dl_manager *dlm = dl->dlm;
852 struct vsp1_device *vsp1 = dlm->vsp1;
853
854
855
856
857
858
859
860
861 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
862 }
863
864 static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
865 {
866 struct vsp1_dl_manager *dlm = dl->dlm;
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883 if (vsp1_dl_list_hw_update_pending(dlm)) {
884 WARN_ON(dlm->pending &&
885 (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
886 __vsp1_dl_list_put(dlm->pending);
887 dlm->pending = dl;
888 return;
889 }
890
891
892
893
894
895 vsp1_dl_list_hw_enqueue(dl);
896
897 __vsp1_dl_list_put(dlm->queued);
898 dlm->queued = dl;
899 }
900
901 static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
902 {
903 struct vsp1_dl_manager *dlm = dl->dlm;
904
905
906
907
908
909
910 vsp1_dl_list_hw_enqueue(dl);
911
912 dlm->active = dl;
913 }
914
915 void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
916 {
917 struct vsp1_dl_manager *dlm = dl->dlm;
918 struct vsp1_dl_list *dl_next;
919 unsigned long flags;
920
921
922 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
923
924 list_for_each_entry(dl_next, &dl->chain, chain) {
925 bool last = list_is_last(&dl_next->chain, &dl->chain);
926
927 vsp1_dl_list_fill_header(dl_next, last);
928 }
929
930 dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
931
932 spin_lock_irqsave(&dlm->lock, flags);
933
934 if (dlm->singleshot)
935 vsp1_dl_list_commit_singleshot(dl);
936 else
937 vsp1_dl_list_commit_continuous(dl);
938
939 spin_unlock_irqrestore(&dlm->lock, flags);
940 }
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967 unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
968 {
969 struct vsp1_device *vsp1 = dlm->vsp1;
970 u32 status = vsp1_read(vsp1, VI6_STATUS);
971 unsigned int flags = 0;
972
973 spin_lock(&dlm->lock);
974
975
976
977
978
979 if (dlm->singleshot) {
980 __vsp1_dl_list_put(dlm->active);
981 dlm->active = NULL;
982 flags |= VSP1_DL_FRAME_END_COMPLETED;
983 goto done;
984 }
985
986
987
988
989
990
991
992 if (vsp1_dl_list_hw_update_pending(dlm))
993 goto done;
994
995
996
997
998
999
1000 if (status & VI6_STATUS_FLD_STD(dlm->index))
1001 goto done;
1002
1003
1004
1005
1006
1007
1008
1009 if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
1010 flags |= VSP1_DL_FRAME_END_WRITEBACK;
1011 dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
1012 }
1013
1014
1015
1016
1017
1018 if (dlm->queued) {
1019 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
1020 flags |= VSP1_DL_FRAME_END_INTERNAL;
1021 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
1022
1023 __vsp1_dl_list_put(dlm->active);
1024 dlm->active = dlm->queued;
1025 dlm->queued = NULL;
1026 flags |= VSP1_DL_FRAME_END_COMPLETED;
1027 }
1028
1029
1030
1031
1032
1033
1034 if (dlm->pending) {
1035 vsp1_dl_list_hw_enqueue(dlm->pending);
1036 dlm->queued = dlm->pending;
1037 dlm->pending = NULL;
1038 }
1039
1040 done:
1041 spin_unlock(&dlm->lock);
1042
1043 return flags;
1044 }
1045
1046
1047 void vsp1_dlm_setup(struct vsp1_device *vsp1)
1048 {
1049 unsigned int i;
1050 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
1051 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
1052 | VI6_DL_CTRL_DLE;
1053 u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
1054 | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
1055
1056 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1057 for (i = 0; i < vsp1->info->wpf_count; ++i)
1058 vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
1059 }
1060
1061 vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
1062 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
1063 }
1064
1065 void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
1066 {
1067 unsigned long flags;
1068
1069 spin_lock_irqsave(&dlm->lock, flags);
1070
1071 __vsp1_dl_list_put(dlm->active);
1072 __vsp1_dl_list_put(dlm->queued);
1073 __vsp1_dl_list_put(dlm->pending);
1074
1075 spin_unlock_irqrestore(&dlm->lock, flags);
1076
1077 dlm->active = NULL;
1078 dlm->queued = NULL;
1079 dlm->pending = NULL;
1080 }
1081
1082 struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
1083 {
1084 return vsp1_dl_body_get(dlm->pool);
1085 }
1086
1087 struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
1088 unsigned int index,
1089 unsigned int prealloc)
1090 {
1091 struct vsp1_dl_manager *dlm;
1092 size_t header_size;
1093 unsigned int i;
1094
1095 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
1096 if (!dlm)
1097 return NULL;
1098
1099 dlm->index = index;
1100 dlm->singleshot = vsp1->info->uapi;
1101 dlm->vsp1 = vsp1;
1102
1103 spin_lock_init(&dlm->lock);
1104 INIT_LIST_HEAD(&dlm->free);
1105
1106
1107
1108
1109
1110
1111
1112
1113 header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
1114 sizeof(struct vsp1_dl_header_extended) :
1115 sizeof(struct vsp1_dl_header);
1116
1117 header_size = ALIGN(header_size, 8);
1118
1119 dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
1120 VSP1_DL_NUM_ENTRIES, header_size);
1121 if (!dlm->pool)
1122 return NULL;
1123
1124 for (i = 0; i < prealloc; ++i) {
1125 struct vsp1_dl_list *dl;
1126
1127 dl = vsp1_dl_list_alloc(dlm);
1128 if (!dl) {
1129 vsp1_dlm_destroy(dlm);
1130 return NULL;
1131 }
1132
1133
1134 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
1135 dl->extension = (void *)dl->header
1136 + sizeof(*dl->header);
1137
1138 list_add_tail(&dl->list, &dlm->free);
1139 }
1140
1141 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1142 dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
1143 VSP1_EXTCMD_AUTOFLD, prealloc);
1144 if (!dlm->cmdpool) {
1145 vsp1_dlm_destroy(dlm);
1146 return NULL;
1147 }
1148 }
1149
1150 return dlm;
1151 }
1152
1153 void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
1154 {
1155 struct vsp1_dl_list *dl, *next;
1156
1157 if (!dlm)
1158 return;
1159
1160 list_for_each_entry_safe(dl, next, &dlm->free, list) {
1161 list_del(&dl->list);
1162 vsp1_dl_list_free(dl);
1163 }
1164
1165 vsp1_dl_body_pool_destroy(dlm->pool);
1166 vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
1167 }