This source file includes following definitions.
- to_virt_desc
- vchan_tx_submit
- vchan_tx_desc_free
- vchan_find_desc
- vchan_complete
- vchan_dma_desc_free_list
- vchan_init
1
2
3
4
5
6
7 #include <linux/device.h>
8 #include <linux/dmaengine.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11
12 #include "virt-dma.h"
13
14 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
15 {
16 return container_of(tx, struct virt_dma_desc, tx);
17 }
18
19 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
20 {
21 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
22 struct virt_dma_desc *vd = to_virt_desc(tx);
23 unsigned long flags;
24 dma_cookie_t cookie;
25
26 spin_lock_irqsave(&vc->lock, flags);
27 cookie = dma_cookie_assign(tx);
28
29 list_move_tail(&vd->node, &vc->desc_submitted);
30 spin_unlock_irqrestore(&vc->lock, flags);
31
32 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
33 vc, vd, cookie);
34
35 return cookie;
36 }
37 EXPORT_SYMBOL_GPL(vchan_tx_submit);
38
39
40
41
42
43
44
45
46
47
48
49 int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
50 {
51 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
52 struct virt_dma_desc *vd = to_virt_desc(tx);
53 unsigned long flags;
54
55 spin_lock_irqsave(&vc->lock, flags);
56 list_del(&vd->node);
57 spin_unlock_irqrestore(&vc->lock, flags);
58
59 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
60 vc, vd, vd->tx.cookie);
61 vc->desc_free(vd);
62 return 0;
63 }
64 EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
65
66 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
67 dma_cookie_t cookie)
68 {
69 struct virt_dma_desc *vd;
70
71 list_for_each_entry(vd, &vc->desc_issued, node)
72 if (vd->tx.cookie == cookie)
73 return vd;
74
75 return NULL;
76 }
77 EXPORT_SYMBOL_GPL(vchan_find_desc);
78
79
80
81
82
83 static void vchan_complete(unsigned long arg)
84 {
85 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
86 struct virt_dma_desc *vd, *_vd;
87 struct dmaengine_desc_callback cb;
88 LIST_HEAD(head);
89
90 spin_lock_irq(&vc->lock);
91 list_splice_tail_init(&vc->desc_completed, &head);
92 vd = vc->cyclic;
93 if (vd) {
94 vc->cyclic = NULL;
95 dmaengine_desc_get_callback(&vd->tx, &cb);
96 } else {
97 memset(&cb, 0, sizeof(cb));
98 }
99 spin_unlock_irq(&vc->lock);
100
101 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
102
103 list_for_each_entry_safe(vd, _vd, &head, node) {
104 dmaengine_desc_get_callback(&vd->tx, &cb);
105
106 list_del(&vd->node);
107 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
108 vchan_vdesc_fini(vd);
109 }
110 }
111
112 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
113 {
114 struct virt_dma_desc *vd, *_vd;
115
116 list_for_each_entry_safe(vd, _vd, head, node) {
117 if (dmaengine_desc_test_reuse(&vd->tx)) {
118 list_move_tail(&vd->node, &vc->desc_allocated);
119 } else {
120 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
121 list_del(&vd->node);
122 vc->desc_free(vd);
123 }
124 }
125 }
126 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
127
128 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
129 {
130 dma_cookie_init(&vc->chan);
131
132 spin_lock_init(&vc->lock);
133 INIT_LIST_HEAD(&vc->desc_allocated);
134 INIT_LIST_HEAD(&vc->desc_submitted);
135 INIT_LIST_HEAD(&vc->desc_issued);
136 INIT_LIST_HEAD(&vc->desc_completed);
137
138 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
139
140 vc->chan.device = dmadev;
141 list_add_tail(&vc->chan.device_node, &dmadev->channels);
142 }
143 EXPORT_SYMBOL_GPL(vchan_init);
144
145 MODULE_AUTHOR("Russell King");
146 MODULE_LICENSE("GPL");