This source file includes following definitions.
- iio_buffer_to_dmaengine_buffer
- iio_dmaengine_buffer_block_done
- iio_dmaengine_buffer_submit_block
- iio_dmaengine_buffer_abort
- iio_dmaengine_buffer_release
- iio_dmaengine_buffer_alloc
- iio_dmaengine_buffer_free
1
2
3
4
5
6
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13
14 #include <linux/iio/iio.h>
15 #include <linux/iio/buffer.h>
16 #include <linux/iio/buffer_impl.h>
17 #include <linux/iio/buffer-dma.h>
18 #include <linux/iio/buffer-dmaengine.h>
19
20
21
22
23
24
25
26
27
28
29
30 struct dmaengine_buffer {
31 struct iio_dma_buffer_queue queue;
32
33 struct dma_chan *chan;
34 struct list_head active;
35
36 size_t align;
37 size_t max_size;
38 };
39
40 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
41 struct iio_buffer *buffer)
42 {
43 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
44 }
45
46 static void iio_dmaengine_buffer_block_done(void *data)
47 {
48 struct iio_dma_buffer_block *block = data;
49 unsigned long flags;
50
51 spin_lock_irqsave(&block->queue->list_lock, flags);
52 list_del(&block->head);
53 spin_unlock_irqrestore(&block->queue->list_lock, flags);
54 iio_dma_buffer_block_done(block);
55 }
56
57 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
58 struct iio_dma_buffer_block *block)
59 {
60 struct dmaengine_buffer *dmaengine_buffer =
61 iio_buffer_to_dmaengine_buffer(&queue->buffer);
62 struct dma_async_tx_descriptor *desc;
63 dma_cookie_t cookie;
64
65 block->bytes_used = min(block->size, dmaengine_buffer->max_size);
66 block->bytes_used = rounddown(block->bytes_used,
67 dmaengine_buffer->align);
68
69 desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
70 block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
71 DMA_PREP_INTERRUPT);
72 if (!desc)
73 return -ENOMEM;
74
75 desc->callback = iio_dmaengine_buffer_block_done;
76 desc->callback_param = block;
77
78 cookie = dmaengine_submit(desc);
79 if (dma_submit_error(cookie))
80 return dma_submit_error(cookie);
81
82 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
83 list_add_tail(&block->head, &dmaengine_buffer->active);
84 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
85
86 dma_async_issue_pending(dmaengine_buffer->chan);
87
88 return 0;
89 }
90
91 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
92 {
93 struct dmaengine_buffer *dmaengine_buffer =
94 iio_buffer_to_dmaengine_buffer(&queue->buffer);
95
96 dmaengine_terminate_sync(dmaengine_buffer->chan);
97 iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
98 }
99
100 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
101 {
102 struct dmaengine_buffer *dmaengine_buffer =
103 iio_buffer_to_dmaengine_buffer(buf);
104
105 iio_dma_buffer_release(&dmaengine_buffer->queue);
106 kfree(dmaengine_buffer);
107 }
108
109 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
110 .read_first_n = iio_dma_buffer_read,
111 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
112 .set_length = iio_dma_buffer_set_length,
113 .request_update = iio_dma_buffer_request_update,
114 .enable = iio_dma_buffer_enable,
115 .disable = iio_dma_buffer_disable,
116 .data_available = iio_dma_buffer_data_available,
117 .release = iio_dmaengine_buffer_release,
118
119 .modes = INDIO_BUFFER_HARDWARE,
120 .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
121 };
122
123 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
124 .submit = iio_dmaengine_buffer_submit_block,
125 .abort = iio_dmaengine_buffer_abort,
126 };
127
128
129
130
131
132
133
134
135
136
137
138
139
140 struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
141 const char *channel)
142 {
143 struct dmaengine_buffer *dmaengine_buffer;
144 unsigned int width, src_width, dest_width;
145 struct dma_slave_caps caps;
146 struct dma_chan *chan;
147 int ret;
148
149 dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
150 if (!dmaengine_buffer)
151 return ERR_PTR(-ENOMEM);
152
153 chan = dma_request_slave_channel_reason(dev, channel);
154 if (IS_ERR(chan)) {
155 ret = PTR_ERR(chan);
156 goto err_free;
157 }
158
159 ret = dma_get_slave_caps(chan, &caps);
160 if (ret < 0)
161 goto err_free;
162
163
164 if (caps.src_addr_widths)
165 src_width = __ffs(caps.src_addr_widths);
166 else
167 src_width = 1;
168 if (caps.dst_addr_widths)
169 dest_width = __ffs(caps.dst_addr_widths);
170 else
171 dest_width = 1;
172 width = max(src_width, dest_width);
173
174 INIT_LIST_HEAD(&dmaengine_buffer->active);
175 dmaengine_buffer->chan = chan;
176 dmaengine_buffer->align = width;
177 dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
178
179 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
180 &iio_dmaengine_default_ops);
181
182 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
183
184 return &dmaengine_buffer->queue.buffer;
185
186 err_free:
187 kfree(dmaengine_buffer);
188 return ERR_PTR(ret);
189 }
190 EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
191
192
193
194
195
196
197
198 void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
199 {
200 struct dmaengine_buffer *dmaengine_buffer =
201 iio_buffer_to_dmaengine_buffer(buffer);
202
203 iio_dma_buffer_exit(&dmaengine_buffer->queue);
204 dma_release_channel(dmaengine_buffer->chan);
205
206 iio_buffer_put(buffer);
207 }
208 EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);