This source file includes following definitions.
- fourcc_to_str
- find_format_by_pix
- find_format_by_code
- notifier_to_ctx
- get_field
- set_field
- cm_create
- camerarx_phy_enable
- camerarx_phy_disable
- cc_create
- cal_get_hwinfo
- cal_runtime_get
- cal_runtime_put
- cal_quickdump_regs
- enable_irqs
- disable_irqs
- csi2_init
- csi2_lane_config
- csi2_ppi_enable
- csi2_ppi_disable
- csi2_ctx_config
- pix_proc_config
- cal_wr_dma_config
- cal_wr_dma_addr
- csi2_phy_config
- cal_get_external_info
- cal_schedule_next_buffer
- cal_process_buffer_complete
- cal_irq
- cal_querycap
- cal_enum_fmt_vid_cap
- __subdev_get_format
- __subdev_set_format
- cal_calc_format_size
- cal_g_fmt_vid_cap
- cal_try_fmt_vid_cap
- cal_s_fmt_vid_cap
- cal_enum_framesizes
- cal_enum_input
- cal_g_input
- cal_s_input
- cal_enum_frameintervals
- cal_queue_setup
- cal_buffer_prepare
- cal_buffer_queue
- cal_start_streaming
- cal_stop_streaming
- cal_async_bound
- cal_async_complete
- cal_complete_ctx
- of_get_next_port
- of_get_next_endpoint
- of_cal_create_instance
- cal_create_instance
- cal_probe
- cal_remove
1
2
3
4
5
6
7
8
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/ioctl.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/slab.h>
17 #include <linux/videodev2.h>
18 #include <linux/of_device.h>
19 #include <linux/of_graph.h>
20
21 #include <media/v4l2-fwnode.h>
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-common.h>
24 #include <media/v4l2-ctrls.h>
25 #include <media/v4l2-device.h>
26 #include <media/v4l2-event.h>
27 #include <media/v4l2-ioctl.h>
28 #include <media/v4l2-fh.h>
29 #include <media/videobuf2-core.h>
30 #include <media/videobuf2-dma-contig.h>
31 #include "cal_regs.h"
32
33 #define CAL_MODULE_NAME "cal"
34
35 #define MAX_WIDTH 1920
36 #define MAX_HEIGHT 1200
37
38 #define CAL_VERSION "0.1.0"
39
40 MODULE_DESCRIPTION("TI CAL driver");
41 MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
42 MODULE_LICENSE("GPL v2");
43 MODULE_VERSION(CAL_VERSION);
44
45 static unsigned video_nr = -1;
46 module_param(video_nr, uint, 0644);
47 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
48
49 static unsigned debug;
50 module_param(debug, uint, 0644);
51 MODULE_PARM_DESC(debug, "activates debug info");
52
53
54 static const struct v4l2_fract
55 tpf_default = {.numerator = 1001, .denominator = 30000};
56
57 #define cal_dbg(level, caldev, fmt, arg...) \
58 v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
59 #define cal_info(caldev, fmt, arg...) \
60 v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
61 #define cal_err(caldev, fmt, arg...) \
62 v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
63
64 #define ctx_dbg(level, ctx, fmt, arg...) \
65 v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
66 #define ctx_info(ctx, fmt, arg...) \
67 v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
68 #define ctx_err(ctx, fmt, arg...) \
69 v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
70
71 #define CAL_NUM_INPUT 1
72 #define CAL_NUM_CONTEXT 2
73
74 #define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
75
76 #define reg_read(dev, offset) ioread32(dev->base + offset)
77 #define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
78
79 #define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
80 mask)
81 #define reg_write_field(dev, offset, field, mask) { \
82 u32 val = reg_read(dev, offset); \
83 set_field(&val, field, mask); \
84 reg_write(dev, offset, val); }
85
86
87
88
89
90
91 struct cal_fmt {
92 u32 fourcc;
93 u32 code;
94 u8 depth;
95 };
96
97 static struct cal_fmt cal_formats[] = {
98 {
99 .fourcc = V4L2_PIX_FMT_YUYV,
100 .code = MEDIA_BUS_FMT_YUYV8_2X8,
101 .depth = 16,
102 }, {
103 .fourcc = V4L2_PIX_FMT_UYVY,
104 .code = MEDIA_BUS_FMT_UYVY8_2X8,
105 .depth = 16,
106 }, {
107 .fourcc = V4L2_PIX_FMT_YVYU,
108 .code = MEDIA_BUS_FMT_YVYU8_2X8,
109 .depth = 16,
110 }, {
111 .fourcc = V4L2_PIX_FMT_VYUY,
112 .code = MEDIA_BUS_FMT_VYUY8_2X8,
113 .depth = 16,
114 }, {
115 .fourcc = V4L2_PIX_FMT_RGB565,
116 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
117 .depth = 16,
118 }, {
119 .fourcc = V4L2_PIX_FMT_RGB565X,
120 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
121 .depth = 16,
122 }, {
123 .fourcc = V4L2_PIX_FMT_RGB555,
124 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
125 .depth = 16,
126 }, {
127 .fourcc = V4L2_PIX_FMT_RGB555X,
128 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
129 .depth = 16,
130 }, {
131 .fourcc = V4L2_PIX_FMT_RGB24,
132 .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
133 .depth = 24,
134 }, {
135 .fourcc = V4L2_PIX_FMT_BGR24,
136 .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
137 .depth = 24,
138 }, {
139 .fourcc = V4L2_PIX_FMT_RGB32,
140 .code = MEDIA_BUS_FMT_ARGB8888_1X32,
141 .depth = 32,
142 }, {
143 .fourcc = V4L2_PIX_FMT_SBGGR8,
144 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
145 .depth = 8,
146 }, {
147 .fourcc = V4L2_PIX_FMT_SGBRG8,
148 .code = MEDIA_BUS_FMT_SGBRG8_1X8,
149 .depth = 8,
150 }, {
151 .fourcc = V4L2_PIX_FMT_SGRBG8,
152 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
153 .depth = 8,
154 }, {
155 .fourcc = V4L2_PIX_FMT_SRGGB8,
156 .code = MEDIA_BUS_FMT_SRGGB8_1X8,
157 .depth = 8,
158 }, {
159 .fourcc = V4L2_PIX_FMT_SBGGR10,
160 .code = MEDIA_BUS_FMT_SBGGR10_1X10,
161 .depth = 16,
162 }, {
163 .fourcc = V4L2_PIX_FMT_SGBRG10,
164 .code = MEDIA_BUS_FMT_SGBRG10_1X10,
165 .depth = 16,
166 }, {
167 .fourcc = V4L2_PIX_FMT_SGRBG10,
168 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
169 .depth = 16,
170 }, {
171 .fourcc = V4L2_PIX_FMT_SRGGB10,
172 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
173 .depth = 16,
174 }, {
175 .fourcc = V4L2_PIX_FMT_SBGGR12,
176 .code = MEDIA_BUS_FMT_SBGGR12_1X12,
177 .depth = 16,
178 }, {
179 .fourcc = V4L2_PIX_FMT_SGBRG12,
180 .code = MEDIA_BUS_FMT_SGBRG12_1X12,
181 .depth = 16,
182 }, {
183 .fourcc = V4L2_PIX_FMT_SGRBG12,
184 .code = MEDIA_BUS_FMT_SGRBG12_1X12,
185 .depth = 16,
186 }, {
187 .fourcc = V4L2_PIX_FMT_SRGGB12,
188 .code = MEDIA_BUS_FMT_SRGGB12_1X12,
189 .depth = 16,
190 },
191 };
192
193
194 static char *fourcc_to_str(u32 fmt)
195 {
196 static char code[5];
197
198 code[0] = (unsigned char)(fmt & 0xff);
199 code[1] = (unsigned char)((fmt >> 8) & 0xff);
200 code[2] = (unsigned char)((fmt >> 16) & 0xff);
201 code[3] = (unsigned char)((fmt >> 24) & 0xff);
202 code[4] = '\0';
203
204 return code;
205 }
206
207
208 struct cal_buffer {
209
210 struct vb2_v4l2_buffer vb;
211 struct list_head list;
212 const struct cal_fmt *fmt;
213 };
214
215 struct cal_dmaqueue {
216 struct list_head active;
217
218
219 int frame;
220 int ini_jiffies;
221 };
222
223 struct cm_data {
224 void __iomem *base;
225 struct resource *res;
226
227 unsigned int camerrx_control;
228
229 struct platform_device *pdev;
230 };
231
232 struct cc_data {
233 void __iomem *base;
234 struct resource *res;
235
236 struct platform_device *pdev;
237 };
238
239
240
241
242
243 struct cal_dev {
244 int irq;
245 void __iomem *base;
246 struct resource *res;
247 struct platform_device *pdev;
248 struct v4l2_device v4l2_dev;
249
250
251 struct cm_data *cm;
252
253 struct cc_data *cc[CAL_NUM_CSI2_PORTS];
254
255 struct cal_ctx *ctx[CAL_NUM_CONTEXT];
256 };
257
258
259
260
261 struct cal_ctx {
262 struct v4l2_device v4l2_dev;
263 struct v4l2_ctrl_handler ctrl_handler;
264 struct video_device vdev;
265 struct v4l2_async_notifier notifier;
266 struct v4l2_subdev *sensor;
267 struct v4l2_fwnode_endpoint endpoint;
268
269 struct v4l2_fh fh;
270 struct cal_dev *dev;
271 struct cc_data *cc;
272
273
274 struct mutex mutex;
275
276 spinlock_t slock;
277
278
279 unsigned long jiffies;
280
281 struct cal_dmaqueue vidq;
282
283
284 int input;
285
286
287 const struct cal_fmt *fmt;
288
289 struct v4l2_format v_fmt;
290
291 struct v4l2_mbus_framefmt m_fmt;
292
293
294 struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)];
295 int num_active_fmt;
296
297 struct v4l2_fract timeperframe;
298 unsigned int sequence;
299 unsigned int external_rate;
300 struct vb2_queue vb_vidq;
301 unsigned int seq_count;
302 unsigned int csi2_port;
303 unsigned int virtual_channel;
304
305
306 struct cal_buffer *cur_frm;
307
308 struct cal_buffer *next_frm;
309 };
310
311 static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
312 u32 pixelformat)
313 {
314 const struct cal_fmt *fmt;
315 unsigned int k;
316
317 for (k = 0; k < ctx->num_active_fmt; k++) {
318 fmt = ctx->active_fmt[k];
319 if (fmt->fourcc == pixelformat)
320 return fmt;
321 }
322
323 return NULL;
324 }
325
326 static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
327 u32 code)
328 {
329 const struct cal_fmt *fmt;
330 unsigned int k;
331
332 for (k = 0; k < ctx->num_active_fmt; k++) {
333 fmt = ctx->active_fmt[k];
334 if (fmt->code == code)
335 return fmt;
336 }
337
338 return NULL;
339 }
340
341 static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
342 {
343 return container_of(n, struct cal_ctx, notifier);
344 }
345
346 static inline int get_field(u32 value, u32 mask)
347 {
348 return (value & mask) >> __ffs(mask);
349 }
350
351 static inline void set_field(u32 *valp, u32 field, u32 mask)
352 {
353 u32 val = *valp;
354
355 val &= ~mask;
356 val |= (field << __ffs(mask)) & mask;
357 *valp = val;
358 }
359
360
361
362
363 static struct cm_data *cm_create(struct cal_dev *dev)
364 {
365 struct platform_device *pdev = dev->pdev;
366 struct cm_data *cm;
367
368 cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
369 if (!cm)
370 return ERR_PTR(-ENOMEM);
371
372 cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
373 "camerrx_control");
374 cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
375 if (IS_ERR(cm->base)) {
376 cal_err(dev, "failed to ioremap\n");
377 return ERR_CAST(cm->base);
378 }
379
380 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
381 cm->res->name, &cm->res->start, &cm->res->end);
382
383 return cm;
384 }
385
386 static void camerarx_phy_enable(struct cal_ctx *ctx)
387 {
388 u32 val;
389
390 if (!ctx->dev->cm->base) {
391 ctx_err(ctx, "cm not mapped\n");
392 return;
393 }
394
395 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
396 if (ctx->csi2_port == 1) {
397 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
398 set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
399
400 set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
401 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
402 } else if (ctx->csi2_port == 2) {
403 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
404 set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
405
406 set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
407 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
408 }
409 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
410 }
411
412 static void camerarx_phy_disable(struct cal_ctx *ctx)
413 {
414 u32 val;
415
416 if (!ctx->dev->cm->base) {
417 ctx_err(ctx, "cm not mapped\n");
418 return;
419 }
420
421 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
422 if (ctx->csi2_port == 1)
423 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
424 else if (ctx->csi2_port == 2)
425 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
426 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
427 }
428
429
430
431
432 static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
433 {
434 struct platform_device *pdev = dev->pdev;
435 struct cc_data *cc;
436
437 cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
438 if (!cc)
439 return ERR_PTR(-ENOMEM);
440
441 cc->res = platform_get_resource_byname(pdev,
442 IORESOURCE_MEM,
443 (core == 0) ?
444 "cal_rx_core0" :
445 "cal_rx_core1");
446 cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
447 if (IS_ERR(cc->base)) {
448 cal_err(dev, "failed to ioremap\n");
449 return ERR_CAST(cc->base);
450 }
451
452 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
453 cc->res->name, &cc->res->start, &cc->res->end);
454
455 return cc;
456 }
457
458
459
460
461 static void cal_get_hwinfo(struct cal_dev *dev)
462 {
463 u32 revision = 0;
464 u32 hwinfo = 0;
465
466 revision = reg_read(dev, CAL_HL_REVISION);
467 cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
468 revision);
469
470 hwinfo = reg_read(dev, CAL_HL_HWINFO);
471 cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
472 hwinfo);
473 }
474
475 static inline int cal_runtime_get(struct cal_dev *dev)
476 {
477 return pm_runtime_get_sync(&dev->pdev->dev);
478 }
479
480 static inline void cal_runtime_put(struct cal_dev *dev)
481 {
482 pm_runtime_put_sync(&dev->pdev->dev);
483 }
484
485 static void cal_quickdump_regs(struct cal_dev *dev)
486 {
487 cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
488 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
489 (__force const void *)dev->base,
490 resource_size(dev->res), false);
491
492 if (dev->ctx[0]) {
493 cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
494 &dev->ctx[0]->cc->res->start);
495 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
496 (__force const void *)dev->ctx[0]->cc->base,
497 resource_size(dev->ctx[0]->cc->res),
498 false);
499 }
500
501 if (dev->ctx[1]) {
502 cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
503 &dev->ctx[1]->cc->res->start);
504 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
505 (__force const void *)dev->ctx[1]->cc->base,
506 resource_size(dev->ctx[1]->cc->res),
507 false);
508 }
509
510 cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
511 &dev->cm->res->start);
512 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
513 (__force const void *)dev->cm->base,
514 resource_size(dev->cm->res), false);
515 }
516
517
518
519
520 static void enable_irqs(struct cal_ctx *ctx)
521 {
522
523 reg_write_field(ctx->dev,
524 CAL_HL_IRQENABLE_SET(2),
525 CAL_HL_IRQ_ENABLE,
526 CAL_HL_IRQ_MASK(ctx->csi2_port));
527
528 reg_write_field(ctx->dev,
529 CAL_HL_IRQENABLE_SET(3),
530 CAL_HL_IRQ_ENABLE,
531 CAL_HL_IRQ_MASK(ctx->csi2_port));
532
533 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
534 }
535
536 static void disable_irqs(struct cal_ctx *ctx)
537 {
538 u32 val;
539
540
541 val = 0;
542 set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
543 reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
544
545 val = 0;
546 set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
547 reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
548
549 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
550 }
551
552 static void csi2_init(struct cal_ctx *ctx)
553 {
554 int i;
555 u32 val;
556
557 val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
558 set_field(&val, CAL_GEN_ENABLE,
559 CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
560 set_field(&val, CAL_GEN_ENABLE,
561 CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
562 set_field(&val, CAL_GEN_DISABLE,
563 CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
564 set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
565 reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
566 ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
567 reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
568
569 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
570 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
571 CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
572 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
573 CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
574 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
575 for (i = 0; i < 10; i++) {
576 if (reg_read_field(ctx->dev,
577 CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
578 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
579 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
580 break;
581 usleep_range(1000, 1100);
582 }
583 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
584 reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
585
586 val = reg_read(ctx->dev, CAL_CTRL);
587 set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
588 set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
589 set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
590 CAL_CTRL_POSTED_WRITES_MASK);
591 set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
592 set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
593 reg_write(ctx->dev, CAL_CTRL, val);
594 ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
595 }
596
597 static void csi2_lane_config(struct cal_ctx *ctx)
598 {
599 u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
600 u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
601 u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
602 struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
603 &ctx->endpoint.bus.mipi_csi2;
604 int lane;
605
606 set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
607 set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
608 for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
609
610
611
612
613 lane_mask <<= 4;
614 polarity_mask <<= 4;
615 set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
616 set_field(&val, mipi_csi2->lane_polarities[lane + 1],
617 polarity_mask);
618 }
619
620 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
621 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
622 ctx->csi2_port, val);
623 }
624
625 static void csi2_ppi_enable(struct cal_ctx *ctx)
626 {
627 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
628 CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
629 }
630
631 static void csi2_ppi_disable(struct cal_ctx *ctx)
632 {
633 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
634 CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
635 }
636
637 static void csi2_ctx_config(struct cal_ctx *ctx)
638 {
639 u32 val;
640
641 val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
642 set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
643
644
645
646
647
648
649
650
651 set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
652
653 set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
654
655 set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
656 set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
657 set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
658 CAL_CSI2_CTX_PACK_MODE_MASK);
659 reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
660 ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
661 reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
662 }
663
664 static void pix_proc_config(struct cal_ctx *ctx)
665 {
666 u32 val;
667
668 val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
669 set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
670 set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
671 set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
672 set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
673 set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
674 set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
675 reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
676 ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
677 reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
678 }
679
680 static void cal_wr_dma_config(struct cal_ctx *ctx,
681 unsigned int width)
682 {
683 u32 val;
684
685 val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
686 set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
687 set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
688 CAL_WR_DMA_CTRL_DTAG_MASK);
689 set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
690 CAL_WR_DMA_CTRL_MODE_MASK);
691 set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
692 CAL_WR_DMA_CTRL_PATTERN_MASK);
693 set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
694 reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
695 ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
696 reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
697
698
699
700
701
702 reg_write_field(ctx->dev,
703 CAL_WR_DMA_OFST(ctx->csi2_port),
704 (width / 16),
705 CAL_WR_DMA_OFST_MASK);
706 ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
707 reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
708
709 val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
710
711 set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
712
713
714
715
716
717 set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
718 reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
719 ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
720 reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
721 }
722
723 static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
724 {
725 reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
726 }
727
728
729
730
731 #define TCLK_TERM 0
732 #define TCLK_MISS 1
733 #define TCLK_SETTLE 14
734 #define THS_SETTLE 15
735
736 static void csi2_phy_config(struct cal_ctx *ctx)
737 {
738 unsigned int reg0, reg1;
739 unsigned int ths_term, ths_settle;
740 unsigned int ddrclkperiod_us;
741
742
743
744
745 ddrclkperiod_us = ctx->external_rate / 2000000;
746 ddrclkperiod_us = 1000000 / ddrclkperiod_us;
747 ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
748
749 ths_term = 20000 / ddrclkperiod_us;
750 ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
751 ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768 ths_settle = THS_SETTLE;
769 ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
770
771 reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
772 set_field(®0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
773 CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
774 set_field(®0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
775 set_field(®0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
776
777 ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
778 reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
779
780 reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
781 set_field(®1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
782 set_field(®1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
783 set_field(®1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
784 set_field(®1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
785
786 ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
787 reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
788 }
789
790 static int cal_get_external_info(struct cal_ctx *ctx)
791 {
792 struct v4l2_ctrl *ctrl;
793
794 if (!ctx->sensor)
795 return -ENODEV;
796
797 ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
798 if (!ctrl) {
799 ctx_err(ctx, "no pixel rate control in subdev: %s\n",
800 ctx->sensor->name);
801 return -EPIPE;
802 }
803
804 ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
805 ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
806
807 return 0;
808 }
809
810 static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
811 {
812 struct cal_dmaqueue *dma_q = &ctx->vidq;
813 struct cal_buffer *buf;
814 unsigned long addr;
815
816 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
817 ctx->next_frm = buf;
818 list_del(&buf->list);
819
820 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
821 cal_wr_dma_addr(ctx, addr);
822 }
823
824 static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
825 {
826 ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
827 ctx->cur_frm->vb.field = ctx->m_fmt.field;
828 ctx->cur_frm->vb.sequence = ctx->sequence++;
829
830 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
831 ctx->cur_frm = ctx->next_frm;
832 }
833
834 #define isvcirqset(irq, vc, ff) (irq & \
835 (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
836
837 #define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
838
839 static irqreturn_t cal_irq(int irq_cal, void *data)
840 {
841 struct cal_dev *dev = (struct cal_dev *)data;
842 struct cal_ctx *ctx;
843 struct cal_dmaqueue *dma_q;
844 u32 irqst2, irqst3;
845
846
847 irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
848 if (irqst2) {
849
850 reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
851
852
853 if (isportirqset(irqst2, 1)) {
854 ctx = dev->ctx[0];
855
856 if (ctx->cur_frm != ctx->next_frm)
857 cal_process_buffer_complete(ctx);
858 }
859
860 if (isportirqset(irqst2, 2)) {
861 ctx = dev->ctx[1];
862
863 if (ctx->cur_frm != ctx->next_frm)
864 cal_process_buffer_complete(ctx);
865 }
866 }
867
868
869 irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
870 if (irqst3) {
871
872 reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
873
874
875 if (isportirqset(irqst3, 1)) {
876 ctx = dev->ctx[0];
877 dma_q = &ctx->vidq;
878
879 spin_lock(&ctx->slock);
880 if (!list_empty(&dma_q->active) &&
881 ctx->cur_frm == ctx->next_frm)
882 cal_schedule_next_buffer(ctx);
883 spin_unlock(&ctx->slock);
884 }
885
886 if (isportirqset(irqst3, 2)) {
887 ctx = dev->ctx[1];
888 dma_q = &ctx->vidq;
889
890 spin_lock(&ctx->slock);
891 if (!list_empty(&dma_q->active) &&
892 ctx->cur_frm == ctx->next_frm)
893 cal_schedule_next_buffer(ctx);
894 spin_unlock(&ctx->slock);
895 }
896 }
897
898 return IRQ_HANDLED;
899 }
900
901
902
903
904 static int cal_querycap(struct file *file, void *priv,
905 struct v4l2_capability *cap)
906 {
907 struct cal_ctx *ctx = video_drvdata(file);
908
909 strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
910 strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
911
912 snprintf(cap->bus_info, sizeof(cap->bus_info),
913 "platform:%s", ctx->v4l2_dev.name);
914 return 0;
915 }
916
917 static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
918 struct v4l2_fmtdesc *f)
919 {
920 struct cal_ctx *ctx = video_drvdata(file);
921 const struct cal_fmt *fmt = NULL;
922
923 if (f->index >= ctx->num_active_fmt)
924 return -EINVAL;
925
926 fmt = ctx->active_fmt[f->index];
927
928 f->pixelformat = fmt->fourcc;
929 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
930 return 0;
931 }
932
933 static int __subdev_get_format(struct cal_ctx *ctx,
934 struct v4l2_mbus_framefmt *fmt)
935 {
936 struct v4l2_subdev_format sd_fmt;
937 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
938 int ret;
939
940 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
941 sd_fmt.pad = 0;
942
943 ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
944 if (ret)
945 return ret;
946
947 *fmt = *mbus_fmt;
948
949 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
950 fmt->width, fmt->height, fmt->code);
951
952 return 0;
953 }
954
955 static int __subdev_set_format(struct cal_ctx *ctx,
956 struct v4l2_mbus_framefmt *fmt)
957 {
958 struct v4l2_subdev_format sd_fmt;
959 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
960 int ret;
961
962 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
963 sd_fmt.pad = 0;
964 *mbus_fmt = *fmt;
965
966 ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
967 if (ret)
968 return ret;
969
970 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
971 fmt->width, fmt->height, fmt->code);
972
973 return 0;
974 }
975
976 static int cal_calc_format_size(struct cal_ctx *ctx,
977 const struct cal_fmt *fmt,
978 struct v4l2_format *f)
979 {
980 if (!fmt) {
981 ctx_dbg(3, ctx, "No cal_fmt provided!\n");
982 return -EINVAL;
983 }
984
985 v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
986 &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
987 f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
988 fmt->depth >> 3);
989 f->fmt.pix.sizeimage = f->fmt.pix.height *
990 f->fmt.pix.bytesperline;
991
992 ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
993 __func__, fourcc_to_str(f->fmt.pix.pixelformat),
994 f->fmt.pix.width, f->fmt.pix.height,
995 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
996
997 return 0;
998 }
999
1000 static int cal_g_fmt_vid_cap(struct file *file, void *priv,
1001 struct v4l2_format *f)
1002 {
1003 struct cal_ctx *ctx = video_drvdata(file);
1004
1005 *f = ctx->v_fmt;
1006
1007 return 0;
1008 }
1009
1010 static int cal_try_fmt_vid_cap(struct file *file, void *priv,
1011 struct v4l2_format *f)
1012 {
1013 struct cal_ctx *ctx = video_drvdata(file);
1014 const struct cal_fmt *fmt;
1015 struct v4l2_subdev_frame_size_enum fse;
1016 int ret, found;
1017
1018 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1019 if (!fmt) {
1020 ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
1021 f->fmt.pix.pixelformat);
1022
1023
1024 fmt = ctx->active_fmt[0];
1025 f->fmt.pix.pixelformat = fmt->fourcc;
1026 }
1027
1028 f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
1029
1030
1031 ret = 0;
1032 found = false;
1033 fse.pad = 0;
1034 fse.code = fmt->code;
1035 fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1036 for (fse.index = 0; ; fse.index++) {
1037 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
1038 NULL, &fse);
1039 if (ret)
1040 break;
1041
1042 if ((f->fmt.pix.width == fse.max_width) &&
1043 (f->fmt.pix.height == fse.max_height)) {
1044 found = true;
1045 break;
1046 } else if ((f->fmt.pix.width >= fse.min_width) &&
1047 (f->fmt.pix.width <= fse.max_width) &&
1048 (f->fmt.pix.height >= fse.min_height) &&
1049 (f->fmt.pix.height <= fse.max_height)) {
1050 found = true;
1051 break;
1052 }
1053 }
1054
1055 if (!found) {
1056
1057 f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
1058 f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
1059 }
1060
1061
1062
1063
1064
1065 f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
1066 return cal_calc_format_size(ctx, fmt, f);
1067 }
1068
1069 static int cal_s_fmt_vid_cap(struct file *file, void *priv,
1070 struct v4l2_format *f)
1071 {
1072 struct cal_ctx *ctx = video_drvdata(file);
1073 struct vb2_queue *q = &ctx->vb_vidq;
1074 const struct cal_fmt *fmt;
1075 struct v4l2_mbus_framefmt mbus_fmt;
1076 int ret;
1077
1078 if (vb2_is_busy(q)) {
1079 ctx_dbg(3, ctx, "%s device busy\n", __func__);
1080 return -EBUSY;
1081 }
1082
1083 ret = cal_try_fmt_vid_cap(file, priv, f);
1084 if (ret < 0)
1085 return ret;
1086
1087 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1088
1089 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
1090
1091 ret = __subdev_set_format(ctx, &mbus_fmt);
1092 if (ret)
1093 return ret;
1094
1095
1096 if (mbus_fmt.code != fmt->code) {
1097 ctx_dbg(3, ctx,
1098 "%s subdev changed format on us, this should not happen\n",
1099 __func__);
1100 return -EINVAL;
1101 }
1102
1103 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1104 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1105 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1106 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1107 ctx->fmt = fmt;
1108 ctx->m_fmt = mbus_fmt;
1109 *f = ctx->v_fmt;
1110
1111 return 0;
1112 }
1113
1114 static int cal_enum_framesizes(struct file *file, void *fh,
1115 struct v4l2_frmsizeenum *fsize)
1116 {
1117 struct cal_ctx *ctx = video_drvdata(file);
1118 const struct cal_fmt *fmt;
1119 struct v4l2_subdev_frame_size_enum fse;
1120 int ret;
1121
1122
1123 fmt = find_format_by_pix(ctx, fsize->pixel_format);
1124 if (!fmt) {
1125 ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
1126 fsize->pixel_format);
1127 return -EINVAL;
1128 }
1129
1130 fse.index = fsize->index;
1131 fse.pad = 0;
1132 fse.code = fmt->code;
1133
1134 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
1135 if (ret)
1136 return ret;
1137
1138 ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1139 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1140 fse.min_height, fse.max_height);
1141
1142 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1143 fsize->discrete.width = fse.max_width;
1144 fsize->discrete.height = fse.max_height;
1145
1146 return 0;
1147 }
1148
1149 static int cal_enum_input(struct file *file, void *priv,
1150 struct v4l2_input *inp)
1151 {
1152 if (inp->index >= CAL_NUM_INPUT)
1153 return -EINVAL;
1154
1155 inp->type = V4L2_INPUT_TYPE_CAMERA;
1156 sprintf(inp->name, "Camera %u", inp->index);
1157 return 0;
1158 }
1159
1160 static int cal_g_input(struct file *file, void *priv, unsigned int *i)
1161 {
1162 struct cal_ctx *ctx = video_drvdata(file);
1163
1164 *i = ctx->input;
1165 return 0;
1166 }
1167
1168 static int cal_s_input(struct file *file, void *priv, unsigned int i)
1169 {
1170 struct cal_ctx *ctx = video_drvdata(file);
1171
1172 if (i >= CAL_NUM_INPUT)
1173 return -EINVAL;
1174
1175 ctx->input = i;
1176 return 0;
1177 }
1178
1179
1180 static int cal_enum_frameintervals(struct file *file, void *priv,
1181 struct v4l2_frmivalenum *fival)
1182 {
1183 struct cal_ctx *ctx = video_drvdata(file);
1184 const struct cal_fmt *fmt;
1185 struct v4l2_subdev_frame_interval_enum fie = {
1186 .index = fival->index,
1187 .width = fival->width,
1188 .height = fival->height,
1189 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1190 };
1191 int ret;
1192
1193 fmt = find_format_by_pix(ctx, fival->pixel_format);
1194 if (!fmt)
1195 return -EINVAL;
1196
1197 fie.code = fmt->code;
1198 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
1199 NULL, &fie);
1200 if (ret)
1201 return ret;
1202 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1203 fival->discrete = fie.interval;
1204
1205 return 0;
1206 }
1207
1208
1209
1210
1211 static int cal_queue_setup(struct vb2_queue *vq,
1212 unsigned int *nbuffers, unsigned int *nplanes,
1213 unsigned int sizes[], struct device *alloc_devs[])
1214 {
1215 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1216 unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
1217
1218 if (vq->num_buffers + *nbuffers < 3)
1219 *nbuffers = 3 - vq->num_buffers;
1220
1221 if (*nplanes) {
1222 if (sizes[0] < size)
1223 return -EINVAL;
1224 size = sizes[0];
1225 }
1226
1227 *nplanes = 1;
1228 sizes[0] = size;
1229
1230 ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
1231
1232 return 0;
1233 }
1234
1235 static int cal_buffer_prepare(struct vb2_buffer *vb)
1236 {
1237 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1238 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1239 vb.vb2_buf);
1240 unsigned long size;
1241
1242 if (WARN_ON(!ctx->fmt))
1243 return -EINVAL;
1244
1245 size = ctx->v_fmt.fmt.pix.sizeimage;
1246 if (vb2_plane_size(vb, 0) < size) {
1247 ctx_err(ctx,
1248 "data will not fit into plane (%lu < %lu)\n",
1249 vb2_plane_size(vb, 0), size);
1250 return -EINVAL;
1251 }
1252
1253 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1254 return 0;
1255 }
1256
1257 static void cal_buffer_queue(struct vb2_buffer *vb)
1258 {
1259 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1260 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1261 vb.vb2_buf);
1262 struct cal_dmaqueue *vidq = &ctx->vidq;
1263 unsigned long flags = 0;
1264
1265
1266 spin_lock_irqsave(&ctx->slock, flags);
1267 list_add_tail(&buf->list, &vidq->active);
1268 spin_unlock_irqrestore(&ctx->slock, flags);
1269 }
1270
1271 static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
1272 {
1273 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1274 struct cal_dmaqueue *dma_q = &ctx->vidq;
1275 struct cal_buffer *buf, *tmp;
1276 unsigned long addr = 0;
1277 unsigned long flags;
1278 int ret;
1279
1280 spin_lock_irqsave(&ctx->slock, flags);
1281 if (list_empty(&dma_q->active)) {
1282 spin_unlock_irqrestore(&ctx->slock, flags);
1283 ctx_dbg(3, ctx, "buffer queue is empty\n");
1284 return -EIO;
1285 }
1286
1287 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
1288 ctx->cur_frm = buf;
1289 ctx->next_frm = buf;
1290 list_del(&buf->list);
1291 spin_unlock_irqrestore(&ctx->slock, flags);
1292
1293 addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
1294 ctx->sequence = 0;
1295
1296 ret = cal_get_external_info(ctx);
1297 if (ret < 0)
1298 goto err;
1299
1300 cal_runtime_get(ctx->dev);
1301
1302 enable_irqs(ctx);
1303 camerarx_phy_enable(ctx);
1304 csi2_init(ctx);
1305 csi2_phy_config(ctx);
1306 csi2_lane_config(ctx);
1307 csi2_ctx_config(ctx);
1308 pix_proc_config(ctx);
1309 cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
1310 cal_wr_dma_addr(ctx, addr);
1311 csi2_ppi_enable(ctx);
1312
1313 ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
1314 if (ret) {
1315 ctx_err(ctx, "stream on failed in subdev\n");
1316 cal_runtime_put(ctx->dev);
1317 goto err;
1318 }
1319
1320 if (debug >= 4)
1321 cal_quickdump_regs(ctx->dev);
1322
1323 return 0;
1324
1325 err:
1326 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1327 list_del(&buf->list);
1328 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
1329 }
1330 return ret;
1331 }
1332
1333 static void cal_stop_streaming(struct vb2_queue *vq)
1334 {
1335 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1336 struct cal_dmaqueue *dma_q = &ctx->vidq;
1337 struct cal_buffer *buf, *tmp;
1338 unsigned long flags;
1339
1340 if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
1341 ctx_err(ctx, "stream off failed in subdev\n");
1342
1343 csi2_ppi_disable(ctx);
1344 disable_irqs(ctx);
1345
1346
1347 spin_lock_irqsave(&ctx->slock, flags);
1348 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1349 list_del(&buf->list);
1350 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1351 }
1352
1353 if (ctx->cur_frm == ctx->next_frm) {
1354 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1355 } else {
1356 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1357 vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
1358 VB2_BUF_STATE_ERROR);
1359 }
1360 ctx->cur_frm = NULL;
1361 ctx->next_frm = NULL;
1362 spin_unlock_irqrestore(&ctx->slock, flags);
1363
1364 cal_runtime_put(ctx->dev);
1365 }
1366
1367 static const struct vb2_ops cal_video_qops = {
1368 .queue_setup = cal_queue_setup,
1369 .buf_prepare = cal_buffer_prepare,
1370 .buf_queue = cal_buffer_queue,
1371 .start_streaming = cal_start_streaming,
1372 .stop_streaming = cal_stop_streaming,
1373 .wait_prepare = vb2_ops_wait_prepare,
1374 .wait_finish = vb2_ops_wait_finish,
1375 };
1376
1377 static const struct v4l2_file_operations cal_fops = {
1378 .owner = THIS_MODULE,
1379 .open = v4l2_fh_open,
1380 .release = vb2_fop_release,
1381 .read = vb2_fop_read,
1382 .poll = vb2_fop_poll,
1383 .unlocked_ioctl = video_ioctl2,
1384 .mmap = vb2_fop_mmap,
1385 };
1386
1387 static const struct v4l2_ioctl_ops cal_ioctl_ops = {
1388 .vidioc_querycap = cal_querycap,
1389 .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
1390 .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
1391 .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
1392 .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
1393 .vidioc_enum_framesizes = cal_enum_framesizes,
1394 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1395 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1396 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1397 .vidioc_querybuf = vb2_ioctl_querybuf,
1398 .vidioc_qbuf = vb2_ioctl_qbuf,
1399 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1400 .vidioc_enum_input = cal_enum_input,
1401 .vidioc_g_input = cal_g_input,
1402 .vidioc_s_input = cal_s_input,
1403 .vidioc_enum_frameintervals = cal_enum_frameintervals,
1404 .vidioc_streamon = vb2_ioctl_streamon,
1405 .vidioc_streamoff = vb2_ioctl_streamoff,
1406 .vidioc_log_status = v4l2_ctrl_log_status,
1407 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1408 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1409 };
1410
1411 static const struct video_device cal_videodev = {
1412 .name = CAL_MODULE_NAME,
1413 .fops = &cal_fops,
1414 .ioctl_ops = &cal_ioctl_ops,
1415 .minor = -1,
1416 .release = video_device_release_empty,
1417 .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1418 V4L2_CAP_READWRITE,
1419 };
1420
1421
1422
1423
1424
1425 static int cal_complete_ctx(struct cal_ctx *ctx);
1426
1427 static int cal_async_bound(struct v4l2_async_notifier *notifier,
1428 struct v4l2_subdev *subdev,
1429 struct v4l2_async_subdev *asd)
1430 {
1431 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1432 struct v4l2_subdev_mbus_code_enum mbus_code;
1433 int ret = 0;
1434 int i, j, k;
1435
1436 if (ctx->sensor) {
1437 ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
1438 subdev->name);
1439 return 0;
1440 }
1441
1442 ctx->sensor = subdev;
1443 ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
1444
1445
1446 ctx->num_active_fmt = 0;
1447 for (j = 0, i = 0; ret != -EINVAL; ++j) {
1448 struct cal_fmt *fmt;
1449
1450 memset(&mbus_code, 0, sizeof(mbus_code));
1451 mbus_code.index = j;
1452 ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
1453 NULL, &mbus_code);
1454 if (ret)
1455 continue;
1456
1457 ctx_dbg(2, ctx,
1458 "subdev %s: code: %04x idx: %d\n",
1459 subdev->name, mbus_code.code, j);
1460
1461 for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
1462 fmt = &cal_formats[k];
1463
1464 if (mbus_code.code == fmt->code) {
1465 ctx->active_fmt[i] = fmt;
1466 ctx_dbg(2, ctx,
1467 "matched fourcc: %s: code: %04x idx: %d\n",
1468 fourcc_to_str(fmt->fourcc),
1469 fmt->code, i);
1470 ctx->num_active_fmt = ++i;
1471 }
1472 }
1473 }
1474
1475 if (i == 0) {
1476 ctx_err(ctx, "No suitable format reported by subdev %s\n",
1477 subdev->name);
1478 return -EINVAL;
1479 }
1480
1481 cal_complete_ctx(ctx);
1482
1483 return 0;
1484 }
1485
1486 static int cal_async_complete(struct v4l2_async_notifier *notifier)
1487 {
1488 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1489 const struct cal_fmt *fmt;
1490 struct v4l2_mbus_framefmt mbus_fmt;
1491 int ret;
1492
1493 ret = __subdev_get_format(ctx, &mbus_fmt);
1494 if (ret)
1495 return ret;
1496
1497 fmt = find_format_by_code(ctx, mbus_fmt.code);
1498 if (!fmt) {
1499 ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
1500 mbus_fmt.code);
1501 return -EINVAL;
1502 }
1503
1504
1505 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1506 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1507 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1508 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1509 ctx->fmt = fmt;
1510 ctx->m_fmt = mbus_fmt;
1511
1512 return 0;
1513 }
1514
1515 static const struct v4l2_async_notifier_operations cal_async_ops = {
1516 .bound = cal_async_bound,
1517 .complete = cal_async_complete,
1518 };
1519
1520 static int cal_complete_ctx(struct cal_ctx *ctx)
1521 {
1522 struct video_device *vfd;
1523 struct vb2_queue *q;
1524 int ret;
1525
1526 ctx->timeperframe = tpf_default;
1527 ctx->external_rate = 192000000;
1528
1529
1530 spin_lock_init(&ctx->slock);
1531 mutex_init(&ctx->mutex);
1532
1533
1534 q = &ctx->vb_vidq;
1535 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1536 q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
1537 q->drv_priv = ctx;
1538 q->buf_struct_size = sizeof(struct cal_buffer);
1539 q->ops = &cal_video_qops;
1540 q->mem_ops = &vb2_dma_contig_memops;
1541 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1542 q->lock = &ctx->mutex;
1543 q->min_buffers_needed = 3;
1544 q->dev = ctx->v4l2_dev.dev;
1545
1546 ret = vb2_queue_init(q);
1547 if (ret)
1548 return ret;
1549
1550
1551 INIT_LIST_HEAD(&ctx->vidq.active);
1552
1553 vfd = &ctx->vdev;
1554 *vfd = cal_videodev;
1555 vfd->v4l2_dev = &ctx->v4l2_dev;
1556 vfd->queue = q;
1557
1558
1559
1560
1561
1562 vfd->lock = &ctx->mutex;
1563 video_set_drvdata(vfd, ctx);
1564
1565 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1566 if (ret < 0)
1567 return ret;
1568
1569 v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
1570 video_device_node_name(vfd));
1571
1572 return 0;
1573 }
1574
1575 static struct device_node *
1576 of_get_next_port(const struct device_node *parent,
1577 struct device_node *prev)
1578 {
1579 struct device_node *port = NULL;
1580
1581 if (!parent)
1582 return NULL;
1583
1584 if (!prev) {
1585 struct device_node *ports;
1586
1587
1588
1589
1590 ports = of_get_child_by_name(parent, "ports");
1591 if (ports)
1592 parent = ports;
1593
1594 port = of_get_child_by_name(parent, "port");
1595
1596
1597 of_node_put(ports);
1598 } else {
1599 struct device_node *ports;
1600
1601 ports = of_get_parent(prev);
1602 if (!ports)
1603 return NULL;
1604
1605 do {
1606 port = of_get_next_child(ports, prev);
1607 if (!port) {
1608 of_node_put(ports);
1609 return NULL;
1610 }
1611 prev = port;
1612 } while (!of_node_name_eq(port, "port"));
1613 of_node_put(ports);
1614 }
1615
1616 return port;
1617 }
1618
1619 static struct device_node *
1620 of_get_next_endpoint(const struct device_node *parent,
1621 struct device_node *prev)
1622 {
1623 struct device_node *ep = NULL;
1624
1625 if (!parent)
1626 return NULL;
1627
1628 do {
1629 ep = of_get_next_child(parent, prev);
1630 if (!ep)
1631 return NULL;
1632 prev = ep;
1633 } while (!of_node_name_eq(ep, "endpoint"));
1634
1635 return ep;
1636 }
1637
1638 static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
1639 {
1640 struct platform_device *pdev = ctx->dev->pdev;
1641 struct device_node *ep_node, *port, *sensor_node, *parent;
1642 struct v4l2_fwnode_endpoint *endpoint;
1643 struct v4l2_async_subdev *asd;
1644 u32 regval = 0;
1645 int ret, index, found_port = 0, lane;
1646
1647 parent = pdev->dev.of_node;
1648
1649 endpoint = &ctx->endpoint;
1650
1651 ep_node = NULL;
1652 port = NULL;
1653 sensor_node = NULL;
1654 ret = -EINVAL;
1655
1656 ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
1657 for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
1658 port = of_get_next_port(parent, port);
1659 if (!port) {
1660 ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
1661 index);
1662 goto cleanup_exit;
1663 }
1664
1665
1666 of_property_read_u32(port, "reg", ®val);
1667 ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
1668 index, inst, regval);
1669 if ((regval == inst) && (index == inst)) {
1670 found_port = 1;
1671 break;
1672 }
1673 }
1674
1675 if (!found_port) {
1676 ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
1677 inst);
1678 goto cleanup_exit;
1679 }
1680
1681 ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
1682 inst);
1683
1684 ep_node = of_get_next_endpoint(port, ep_node);
1685 if (!ep_node) {
1686 ctx_dbg(3, ctx, "can't get next endpoint\n");
1687 goto cleanup_exit;
1688 }
1689
1690 sensor_node = of_graph_get_remote_port_parent(ep_node);
1691 if (!sensor_node) {
1692 ctx_dbg(3, ctx, "can't get remote parent\n");
1693 goto cleanup_exit;
1694 }
1695
1696 v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint);
1697
1698 if (endpoint->bus_type != V4L2_MBUS_CSI2_DPHY) {
1699 ctx_err(ctx, "Port:%d sub-device %pOFn is not a CSI2 device\n",
1700 inst, sensor_node);
1701 goto cleanup_exit;
1702 }
1703
1704
1705 ctx->virtual_channel = endpoint->base.id;
1706
1707 ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
1708 ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
1709 ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
1710 ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
1711 ctx_dbg(3, ctx, "num_data_lanes=%d\n",
1712 endpoint->bus.mipi_csi2.num_data_lanes);
1713 ctx_dbg(3, ctx, "data_lanes= <\n");
1714 for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
1715 ctx_dbg(3, ctx, "\t%d\n",
1716 endpoint->bus.mipi_csi2.data_lanes[lane]);
1717 ctx_dbg(3, ctx, "\t>\n");
1718
1719 ctx_dbg(1, ctx, "Port: %d found sub-device %pOFn\n",
1720 inst, sensor_node);
1721
1722 v4l2_async_notifier_init(&ctx->notifier);
1723
1724 asd = kzalloc(sizeof(*asd), GFP_KERNEL);
1725 if (!asd)
1726 goto cleanup_exit;
1727
1728 asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
1729 asd->match.fwnode = of_fwnode_handle(sensor_node);
1730
1731 ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd);
1732 if (ret) {
1733 ctx_err(ctx, "Error adding asd\n");
1734 kfree(asd);
1735 goto cleanup_exit;
1736 }
1737
1738 ctx->notifier.ops = &cal_async_ops;
1739 ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
1740 &ctx->notifier);
1741 if (ret) {
1742 ctx_err(ctx, "Error registering async notifier\n");
1743 v4l2_async_notifier_cleanup(&ctx->notifier);
1744 ret = -EINVAL;
1745 }
1746
1747
1748
1749
1750
1751
1752 sensor_node = NULL;
1753
1754 cleanup_exit:
1755 of_node_put(sensor_node);
1756 of_node_put(ep_node);
1757 of_node_put(port);
1758
1759 return ret;
1760 }
1761
1762 static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
1763 {
1764 struct cal_ctx *ctx;
1765 struct v4l2_ctrl_handler *hdl;
1766 int ret;
1767
1768 ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1769 if (!ctx)
1770 return NULL;
1771
1772
1773 ctx->dev = dev;
1774
1775 snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
1776 "%s-%03d", CAL_MODULE_NAME, inst);
1777 ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
1778 if (ret)
1779 goto err_exit;
1780
1781 hdl = &ctx->ctrl_handler;
1782 ret = v4l2_ctrl_handler_init(hdl, 11);
1783 if (ret) {
1784 ctx_err(ctx, "Failed to init ctrl handler\n");
1785 goto unreg_dev;
1786 }
1787 ctx->v4l2_dev.ctrl_handler = hdl;
1788
1789
1790 ctx->cc = dev->cc[inst];
1791
1792
1793 ctx->csi2_port = inst + 1;
1794
1795 ret = of_cal_create_instance(ctx, inst);
1796 if (ret) {
1797 ret = -EINVAL;
1798 goto free_hdl;
1799 }
1800 return ctx;
1801
1802 free_hdl:
1803 v4l2_ctrl_handler_free(hdl);
1804 unreg_dev:
1805 v4l2_device_unregister(&ctx->v4l2_dev);
1806 err_exit:
1807 return NULL;
1808 }
1809
1810 static int cal_probe(struct platform_device *pdev)
1811 {
1812 struct cal_dev *dev;
1813 struct cal_ctx *ctx;
1814 int ret;
1815 int irq;
1816 int i;
1817
1818 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1819 if (!dev)
1820 return -ENOMEM;
1821
1822
1823 strscpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
1824 sizeof(dev->v4l2_dev.name));
1825
1826
1827 dev->pdev = pdev;
1828
1829 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1830 "cal_top");
1831 dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
1832 if (IS_ERR(dev->base))
1833 return PTR_ERR(dev->base);
1834
1835 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
1836 dev->res->name, &dev->res->start, &dev->res->end);
1837
1838 irq = platform_get_irq(pdev, 0);
1839 cal_dbg(1, dev, "got irq# %d\n", irq);
1840 ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
1841 dev);
1842 if (ret)
1843 return ret;
1844
1845 platform_set_drvdata(pdev, dev);
1846
1847 dev->cm = cm_create(dev);
1848 if (IS_ERR(dev->cm))
1849 return PTR_ERR(dev->cm);
1850
1851 dev->cc[0] = cc_create(dev, 0);
1852 if (IS_ERR(dev->cc[0]))
1853 return PTR_ERR(dev->cc[0]);
1854
1855 dev->cc[1] = cc_create(dev, 1);
1856 if (IS_ERR(dev->cc[1]))
1857 return PTR_ERR(dev->cc[1]);
1858
1859 dev->ctx[0] = NULL;
1860 dev->ctx[1] = NULL;
1861
1862 dev->ctx[0] = cal_create_instance(dev, 0);
1863 dev->ctx[1] = cal_create_instance(dev, 1);
1864 if (!dev->ctx[0] && !dev->ctx[1]) {
1865 cal_err(dev, "Neither port is configured, no point in staying up\n");
1866 return -ENODEV;
1867 }
1868
1869 pm_runtime_enable(&pdev->dev);
1870
1871 ret = cal_runtime_get(dev);
1872 if (ret)
1873 goto runtime_disable;
1874
1875
1876 cal_get_hwinfo(dev);
1877
1878 cal_runtime_put(dev);
1879
1880 return 0;
1881
1882 runtime_disable:
1883 pm_runtime_disable(&pdev->dev);
1884 for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1885 ctx = dev->ctx[i];
1886 if (ctx) {
1887 v4l2_async_notifier_unregister(&ctx->notifier);
1888 v4l2_async_notifier_cleanup(&ctx->notifier);
1889 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1890 v4l2_device_unregister(&ctx->v4l2_dev);
1891 }
1892 }
1893
1894 return ret;
1895 }
1896
1897 static int cal_remove(struct platform_device *pdev)
1898 {
1899 struct cal_dev *dev =
1900 (struct cal_dev *)platform_get_drvdata(pdev);
1901 struct cal_ctx *ctx;
1902 int i;
1903
1904 cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
1905
1906 cal_runtime_get(dev);
1907
1908 for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1909 ctx = dev->ctx[i];
1910 if (ctx) {
1911 ctx_dbg(1, ctx, "unregistering %s\n",
1912 video_device_node_name(&ctx->vdev));
1913 camerarx_phy_disable(ctx);
1914 v4l2_async_notifier_unregister(&ctx->notifier);
1915 v4l2_async_notifier_cleanup(&ctx->notifier);
1916 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1917 v4l2_device_unregister(&ctx->v4l2_dev);
1918 video_unregister_device(&ctx->vdev);
1919 }
1920 }
1921
1922 cal_runtime_put(dev);
1923 pm_runtime_disable(&pdev->dev);
1924
1925 return 0;
1926 }
1927
1928 #if defined(CONFIG_OF)
1929 static const struct of_device_id cal_of_match[] = {
1930 { .compatible = "ti,dra72-cal", },
1931 {},
1932 };
1933 MODULE_DEVICE_TABLE(of, cal_of_match);
1934 #endif
1935
1936 static struct platform_driver cal_pdrv = {
1937 .probe = cal_probe,
1938 .remove = cal_remove,
1939 .driver = {
1940 .name = CAL_MODULE_NAME,
1941 .of_match_table = of_match_ptr(cal_of_match),
1942 },
1943 };
1944
1945 module_platform_driver(cal_pdrv);