This source file includes following definitions.
- vb2_v4l2_to_csi_buffer
- vb2_to_csi_buffer
- sun4i_csi_capture_start
- sun4i_csi_capture_stop
- sun4i_csi_queue_setup
- sun4i_csi_buffer_prepare
- sun4i_csi_setup_scratch_buffer
- sun4i_csi_buffer_fill_slot
- sun4i_csi_buffer_fill_all
- sun4i_csi_buffer_mark_done
- sun4i_csi_buffer_flip
- sun4i_csi_buffer_queue
- return_all_buffers
- sun4i_csi_start_streaming
- sun4i_csi_stop_streaming
- sun4i_csi_irq
- sun4i_csi_dma_register
- sun4i_csi_dma_unregister
1
2
3
4
5
6
7
8
9 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <media/videobuf2-dma-contig.h>
15 #include <media/videobuf2-v4l2.h>
16
17 #include "sun4i_csi.h"
18
19 struct sun4i_csi_buffer {
20 struct vb2_v4l2_buffer vb;
21 struct list_head list;
22 };
23
24 static inline struct sun4i_csi_buffer *
25 vb2_v4l2_to_csi_buffer(const struct vb2_v4l2_buffer *p)
26 {
27 return container_of(p, struct sun4i_csi_buffer, vb);
28 }
29
30 static inline struct sun4i_csi_buffer *
31 vb2_to_csi_buffer(const struct vb2_buffer *p)
32 {
33 return vb2_v4l2_to_csi_buffer(to_vb2_v4l2_buffer(p));
34 }
35
36 static void sun4i_csi_capture_start(struct sun4i_csi *csi)
37 {
38 writel(CSI_CPT_CTRL_VIDEO_START, csi->regs + CSI_CPT_CTRL_REG);
39 }
40
41 static void sun4i_csi_capture_stop(struct sun4i_csi *csi)
42 {
43 writel(0, csi->regs + CSI_CPT_CTRL_REG);
44 }
45
46 static int sun4i_csi_queue_setup(struct vb2_queue *vq,
47 unsigned int *nbuffers,
48 unsigned int *nplanes,
49 unsigned int sizes[],
50 struct device *alloc_devs[])
51 {
52 struct sun4i_csi *csi = vb2_get_drv_priv(vq);
53 unsigned int num_planes = csi->fmt.num_planes;
54 unsigned int i;
55
56 if (*nplanes) {
57 if (*nplanes != num_planes)
58 return -EINVAL;
59
60 for (i = 0; i < num_planes; i++)
61 if (sizes[i] < csi->fmt.plane_fmt[i].sizeimage)
62 return -EINVAL;
63 return 0;
64 }
65
66 *nplanes = num_planes;
67 for (i = 0; i < num_planes; i++)
68 sizes[i] = csi->fmt.plane_fmt[i].sizeimage;
69
70 return 0;
71 };
72
73 static int sun4i_csi_buffer_prepare(struct vb2_buffer *vb)
74 {
75 struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
76 unsigned int i;
77
78 for (i = 0; i < csi->fmt.num_planes; i++) {
79 unsigned long size = csi->fmt.plane_fmt[i].sizeimage;
80
81 if (vb2_plane_size(vb, i) < size) {
82 dev_err(csi->dev, "buffer too small (%lu < %lu)\n",
83 vb2_plane_size(vb, i), size);
84 return -EINVAL;
85 }
86
87 vb2_set_plane_payload(vb, i, size);
88 }
89
90 return 0;
91 }
92
93 static int sun4i_csi_setup_scratch_buffer(struct sun4i_csi *csi,
94 unsigned int slot)
95 {
96 dma_addr_t addr = csi->scratch.paddr;
97 unsigned int plane;
98
99 dev_dbg(csi->dev,
100 "No more available buffer, using the scratch buffer\n");
101
102 for (plane = 0; plane < csi->fmt.num_planes; plane++) {
103 writel(addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
104 addr += csi->fmt.plane_fmt[plane].sizeimage;
105 }
106
107 csi->current_buf[slot] = NULL;
108 return 0;
109 }
110
111 static int sun4i_csi_buffer_fill_slot(struct sun4i_csi *csi, unsigned int slot)
112 {
113 struct sun4i_csi_buffer *c_buf;
114 struct vb2_v4l2_buffer *v_buf;
115 unsigned int plane;
116
117
118
119
120
121 if (WARN_ON(csi->current_buf[slot]))
122 return -EINVAL;
123
124 if (list_empty(&csi->buf_list))
125 return sun4i_csi_setup_scratch_buffer(csi, slot);
126
127 c_buf = list_first_entry(&csi->buf_list, struct sun4i_csi_buffer, list);
128 list_del_init(&c_buf->list);
129
130 v_buf = &c_buf->vb;
131 csi->current_buf[slot] = v_buf;
132
133 for (plane = 0; plane < csi->fmt.num_planes; plane++) {
134 dma_addr_t buf_addr;
135
136 buf_addr = vb2_dma_contig_plane_dma_addr(&v_buf->vb2_buf,
137 plane);
138 writel(buf_addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
139 }
140
141 return 0;
142 }
143
144 static int sun4i_csi_buffer_fill_all(struct sun4i_csi *csi)
145 {
146 unsigned int slot;
147 int ret;
148
149 for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
150 ret = sun4i_csi_buffer_fill_slot(csi, slot);
151 if (ret)
152 return ret;
153 }
154
155 return 0;
156 }
157
158 static void sun4i_csi_buffer_mark_done(struct sun4i_csi *csi,
159 unsigned int slot,
160 unsigned int sequence)
161 {
162 struct vb2_v4l2_buffer *v_buf;
163
164 if (!csi->current_buf[slot]) {
165 dev_dbg(csi->dev, "Scratch buffer was used, ignoring..\n");
166 return;
167 }
168
169 v_buf = csi->current_buf[slot];
170 v_buf->field = csi->fmt.field;
171 v_buf->sequence = sequence;
172 v_buf->vb2_buf.timestamp = ktime_get_ns();
173 vb2_buffer_done(&v_buf->vb2_buf, VB2_BUF_STATE_DONE);
174
175 csi->current_buf[slot] = NULL;
176 }
177
178 static int sun4i_csi_buffer_flip(struct sun4i_csi *csi, unsigned int sequence)
179 {
180 u32 reg = readl(csi->regs + CSI_BUF_CTRL_REG);
181 unsigned int next;
182
183
184 next = !(reg & CSI_BUF_CTRL_DBS);
185
186
187 sun4i_csi_buffer_mark_done(csi, next, sequence);
188
189
190 return sun4i_csi_buffer_fill_slot(csi, next);
191 }
192
193 static void sun4i_csi_buffer_queue(struct vb2_buffer *vb)
194 {
195 struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
196 struct sun4i_csi_buffer *buf = vb2_to_csi_buffer(vb);
197 unsigned long flags;
198
199 spin_lock_irqsave(&csi->qlock, flags);
200 list_add_tail(&buf->list, &csi->buf_list);
201 spin_unlock_irqrestore(&csi->qlock, flags);
202 }
203
204 static void return_all_buffers(struct sun4i_csi *csi,
205 enum vb2_buffer_state state)
206 {
207 struct sun4i_csi_buffer *buf, *node;
208 unsigned int slot;
209
210 list_for_each_entry_safe(buf, node, &csi->buf_list, list) {
211 vb2_buffer_done(&buf->vb.vb2_buf, state);
212 list_del(&buf->list);
213 }
214
215 for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
216 struct vb2_v4l2_buffer *v_buf = csi->current_buf[slot];
217
218 if (!v_buf)
219 continue;
220
221 vb2_buffer_done(&v_buf->vb2_buf, state);
222 csi->current_buf[slot] = NULL;
223 }
224 }
225
226 static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
227 {
228 struct sun4i_csi *csi = vb2_get_drv_priv(vq);
229 struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
230 const struct sun4i_csi_format *csi_fmt;
231 unsigned long href_pol, pclk_pol, vref_pol;
232 unsigned long flags;
233 unsigned int i;
234 int ret;
235
236 csi_fmt = sun4i_csi_find_format(&csi->fmt.pixelformat, NULL);
237 if (!csi_fmt)
238 return -EINVAL;
239
240 dev_dbg(csi->dev, "Starting capture\n");
241
242 csi->sequence = 0;
243
244
245
246
247
248
249
250
251
252
253
254
255 csi->scratch.size = 0;
256 for (i = 0; i < csi->fmt.num_planes; i++)
257 csi->scratch.size += csi->fmt.plane_fmt[i].sizeimage;
258
259 csi->scratch.vaddr = dma_alloc_coherent(csi->dev,
260 csi->scratch.size,
261 &csi->scratch.paddr,
262 GFP_KERNEL);
263 if (!csi->scratch.vaddr) {
264 dev_err(csi->dev, "Failed to allocate scratch buffer\n");
265 ret = -ENOMEM;
266 goto err_clear_dma_queue;
267 }
268
269 ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe);
270 if (ret < 0)
271 goto err_free_scratch_buffer;
272
273 spin_lock_irqsave(&csi->qlock, flags);
274
275
276 writel(CSI_WIN_CTRL_W_ACTIVE(csi->fmt.width * 2),
277 csi->regs + CSI_WIN_CTRL_W_REG);
278 writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
279 csi->regs + CSI_WIN_CTRL_H_REG);
280
281
282
283
284
285
286
287
288
289 href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
290 vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
291 pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING);
292 writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
293 CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
294 CSI_CFG_VREF_POL(vref_pol) |
295 CSI_CFG_HREF_POL(href_pol) |
296 CSI_CFG_PCLK_POL(pclk_pol),
297 csi->regs + CSI_CFG_REG);
298
299
300 writel(csi->fmt.plane_fmt[0].bytesperline,
301 csi->regs + CSI_BUF_LEN_REG);
302
303
304 ret = sun4i_csi_buffer_fill_all(csi);
305 if (ret) {
306 spin_unlock_irqrestore(&csi->qlock, flags);
307 goto err_disable_pipeline;
308 }
309
310
311 writel(CSI_BUF_CTRL_DBE, csi->regs + CSI_BUF_CTRL_REG);
312
313
314 writel(CSI_INT_FRM_DONE, csi->regs + 0x34);
315
316
317 writel(CSI_INT_FRM_DONE, csi->regs + CSI_INT_EN_REG);
318
319 sun4i_csi_capture_start(csi);
320
321 spin_unlock_irqrestore(&csi->qlock, flags);
322
323 ret = v4l2_subdev_call(csi->src_subdev, video, s_stream, 1);
324 if (ret < 0 && ret != -ENOIOCTLCMD)
325 goto err_disable_device;
326
327 return 0;
328
329 err_disable_device:
330 sun4i_csi_capture_stop(csi);
331
332 err_disable_pipeline:
333 media_pipeline_stop(&csi->vdev.entity);
334
335 err_free_scratch_buffer:
336 dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
337 csi->scratch.paddr);
338
339 err_clear_dma_queue:
340 spin_lock_irqsave(&csi->qlock, flags);
341 return_all_buffers(csi, VB2_BUF_STATE_QUEUED);
342 spin_unlock_irqrestore(&csi->qlock, flags);
343
344 return ret;
345 }
346
347 static void sun4i_csi_stop_streaming(struct vb2_queue *vq)
348 {
349 struct sun4i_csi *csi = vb2_get_drv_priv(vq);
350 unsigned long flags;
351
352 dev_dbg(csi->dev, "Stopping capture\n");
353
354 v4l2_subdev_call(csi->src_subdev, video, s_stream, 0);
355 sun4i_csi_capture_stop(csi);
356
357
358 spin_lock_irqsave(&csi->qlock, flags);
359 return_all_buffers(csi, VB2_BUF_STATE_ERROR);
360 spin_unlock_irqrestore(&csi->qlock, flags);
361
362 media_pipeline_stop(&csi->vdev.entity);
363
364 dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
365 csi->scratch.paddr);
366 }
367
368 static const struct vb2_ops sun4i_csi_qops = {
369 .queue_setup = sun4i_csi_queue_setup,
370 .buf_prepare = sun4i_csi_buffer_prepare,
371 .buf_queue = sun4i_csi_buffer_queue,
372 .start_streaming = sun4i_csi_start_streaming,
373 .stop_streaming = sun4i_csi_stop_streaming,
374 .wait_prepare = vb2_ops_wait_prepare,
375 .wait_finish = vb2_ops_wait_finish,
376 };
377
378 static irqreturn_t sun4i_csi_irq(int irq, void *data)
379 {
380 struct sun4i_csi *csi = data;
381 u32 reg;
382
383 reg = readl(csi->regs + CSI_INT_STA_REG);
384
385
386 writel(reg, csi->regs + CSI_INT_STA_REG);
387
388 if (!(reg & CSI_INT_FRM_DONE))
389 return IRQ_HANDLED;
390
391 spin_lock(&csi->qlock);
392 if (sun4i_csi_buffer_flip(csi, csi->sequence++)) {
393 dev_warn(csi->dev, "%s: Flip failed\n", __func__);
394 sun4i_csi_capture_stop(csi);
395 }
396 spin_unlock(&csi->qlock);
397
398 return IRQ_HANDLED;
399 }
400
401 int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
402 {
403 struct vb2_queue *q = &csi->queue;
404 int ret;
405 int i;
406
407 spin_lock_init(&csi->qlock);
408 mutex_init(&csi->lock);
409
410 INIT_LIST_HEAD(&csi->buf_list);
411 for (i = 0; i < CSI_MAX_BUFFER; i++)
412 csi->current_buf[i] = NULL;
413
414 q->min_buffers_needed = 3;
415 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
416 q->io_modes = VB2_MMAP;
417 q->lock = &csi->lock;
418 q->drv_priv = csi;
419 q->buf_struct_size = sizeof(struct sun4i_csi_buffer);
420 q->ops = &sun4i_csi_qops;
421 q->mem_ops = &vb2_dma_contig_memops;
422 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
423 q->dev = csi->dev;
424
425 ret = vb2_queue_init(q);
426 if (ret < 0) {
427 dev_err(csi->dev, "failed to initialize VB2 queue\n");
428 goto err_free_mutex;
429 }
430
431 ret = v4l2_device_register(csi->dev, &csi->v4l);
432 if (ret) {
433 dev_err(csi->dev, "Couldn't register the v4l2 device\n");
434 goto err_free_queue;
435 }
436
437 ret = devm_request_irq(csi->dev, irq, sun4i_csi_irq, 0,
438 dev_name(csi->dev), csi);
439 if (ret) {
440 dev_err(csi->dev, "Couldn't register our interrupt\n");
441 goto err_unregister_device;
442 }
443
444 return 0;
445
446 err_unregister_device:
447 v4l2_device_unregister(&csi->v4l);
448
449 err_free_queue:
450 vb2_queue_release(q);
451
452 err_free_mutex:
453 mutex_destroy(&csi->lock);
454 return ret;
455 }
456
457 void sun4i_csi_dma_unregister(struct sun4i_csi *csi)
458 {
459 v4l2_device_unregister(&csi->v4l);
460 vb2_queue_release(&csi->queue);
461 mutex_destroy(&csi->lock);
462 }