This source file includes following definitions.
- cio2_find_format
- cio2_bytesperline
- cio2_fbpt_exit_dummy
- cio2_fbpt_init_dummy
- cio2_fbpt_entry_enable
- cio2_fbpt_entry_init_dummy
- cio2_fbpt_entry_init_buf
- cio2_fbpt_init
- cio2_fbpt_exit
- cio2_rx_timing
- cio2_csi2_calc_timing
- cio2_hw_init
- cio2_hw_exit
- cio2_buffer_done
- cio2_queue_event_sof
- cio2_irq_handle_once
- cio2_irq
- cio2_vb2_return_all_buffers
- cio2_vb2_queue_setup
- cio2_vb2_buf_init
- cio2_vb2_buf_queue
- cio2_vb2_buf_cleanup
- cio2_vb2_start_streaming
- cio2_vb2_stop_streaming
- cio2_v4l2_querycap
- cio2_v4l2_enum_fmt
- cio2_v4l2_g_fmt
- cio2_v4l2_try_fmt
- cio2_v4l2_s_fmt
- cio2_video_enum_input
- cio2_video_g_input
- cio2_video_s_input
- cio2_subdev_subscribe_event
- cio2_subdev_open
- cio2_subdev_get_fmt
- cio2_subdev_set_fmt
- cio2_subdev_enum_mbus_code
- cio2_subdev_link_validate_get_format
- cio2_video_link_validate
- cio2_notifier_bound
- cio2_notifier_unbind
- cio2_notifier_complete
- cio2_parse_firmware
- cio2_queue_init
- cio2_queue_exit
- cio2_queues_init
- cio2_queues_exit
- cio2_pci_config_setup
- cio2_pci_probe
- cio2_pci_remove
- cio2_runtime_suspend
- cio2_runtime_resume
- arrange
- cio2_fbpt_rearrange
- cio2_suspend
- cio2_resume
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/property.h>
21 #include <linux/vmalloc.h>
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fwnode.h>
26 #include <media/v4l2-ioctl.h>
27 #include <media/videobuf2-dma-sg.h>
28
29 #include "ipu3-cio2.h"
30
31 struct ipu3_cio2_fmt {
32 u32 mbus_code;
33 u32 fourcc;
34 u8 mipicode;
35 };
36
37
38
39
40
41
42
43 static const struct ipu3_cio2_fmt formats[] = {
44 {
45 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
46 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
47 .mipicode = 0x2b,
48 }, {
49 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
50 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
51 .mipicode = 0x2b,
52 }, {
53 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
54 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
55 .mipicode = 0x2b,
56 }, {
57 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
58 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
59 .mipicode = 0x2b,
60 },
61 };
62
63
64
65
66
67
68 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
69 const u32 *mbus_code)
70 {
71 unsigned int i;
72
73 for (i = 0; i < ARRAY_SIZE(formats); i++) {
74 if (pixelformat && *pixelformat != formats[i].fourcc)
75 continue;
76 if (mbus_code && *mbus_code != formats[i].mbus_code)
77 continue;
78
79 return &formats[i];
80 }
81
82 return NULL;
83 }
84
85 static inline u32 cio2_bytesperline(const unsigned int width)
86 {
87
88
89
90
91 return DIV_ROUND_UP(width, 50) * 64;
92 }
93
94
95
96 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
97 {
98 if (cio2->dummy_lop) {
99 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
100 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
101 cio2->dummy_lop = NULL;
102 }
103 if (cio2->dummy_page) {
104 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
105 cio2->dummy_page, cio2->dummy_page_bus_addr);
106 cio2->dummy_page = NULL;
107 }
108 }
109
110 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
111 {
112 unsigned int i;
113
114 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
115 CIO2_PAGE_SIZE,
116 &cio2->dummy_page_bus_addr,
117 GFP_KERNEL);
118 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
119 CIO2_PAGE_SIZE,
120 &cio2->dummy_lop_bus_addr,
121 GFP_KERNEL);
122 if (!cio2->dummy_page || !cio2->dummy_lop) {
123 cio2_fbpt_exit_dummy(cio2);
124 return -ENOMEM;
125 }
126
127
128
129
130 for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
131 cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
132
133 return 0;
134 }
135
136 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
137 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
138 {
139
140
141
142
143
144
145 dma_wmb();
146
147
148
149
150
151 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
152 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
153 }
154
155
156 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
157 struct cio2_fbpt_entry
158 entry[CIO2_MAX_LOPS])
159 {
160 unsigned int i;
161
162 entry[0].first_entry.first_page_offset = 0;
163 entry[1].second_entry.num_of_pages =
164 CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
165 entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
166
167 for (i = 0; i < CIO2_MAX_LOPS; i++)
168 entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
169
170 cio2_fbpt_entry_enable(cio2, entry);
171 }
172
173
174 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
175 struct cio2_buffer *b,
176 struct cio2_fbpt_entry
177 entry[CIO2_MAX_LOPS])
178 {
179 struct vb2_buffer *vb = &b->vbb.vb2_buf;
180 unsigned int length = vb->planes[0].length;
181 int remaining, i;
182
183 entry[0].first_entry.first_page_offset = b->offset;
184 remaining = length + entry[0].first_entry.first_page_offset;
185 entry[1].second_entry.num_of_pages =
186 DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
187
188
189
190
191
192
193
194
195 entry[1].second_entry.last_page_available_bytes =
196 (remaining & ~PAGE_MASK) ?
197 (remaining & ~PAGE_MASK) - 1 :
198 CIO2_PAGE_SIZE - 1;
199
200 remaining = length;
201 i = 0;
202 while (remaining > 0) {
203 entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
204 remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
205 entry++;
206 i++;
207 }
208
209
210
211
212 entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
213
214 cio2_fbpt_entry_enable(cio2, entry);
215 }
216
217 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
218 {
219 struct device *dev = &cio2->pci_dev->dev;
220
221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
222 GFP_KERNEL);
223 if (!q->fbpt)
224 return -ENOMEM;
225
226 return 0;
227 }
228
229 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
230 {
231 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
232 }
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269 #define LIMIT_SHIFT 8
270
271 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
272 {
273 const u32 accinv = 16;
274 const u32 uiinv = 500000000;
275 s32 r;
276
277 freq >>= LIMIT_SHIFT;
278
279 if (WARN_ON(freq <= 0 || freq > S32_MAX))
280 return def;
281
282
283
284
285 r = accinv * b * (uiinv >> LIMIT_SHIFT);
286 r = r / (s32)freq;
287
288 r += accinv * a;
289
290 return r;
291 };
292
293
294 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
295 struct cio2_csi2_timing *timing)
296 {
297 struct device *dev = &cio2->pci_dev->dev;
298 struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
299 struct v4l2_ctrl *link_freq;
300 s64 freq;
301 int r;
302
303 if (!q->sensor)
304 return -ENODEV;
305
306 link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
307 if (!link_freq) {
308 dev_err(dev, "failed to find LINK_FREQ\n");
309 return -EPIPE;
310 }
311
312 qm.index = v4l2_ctrl_g_ctrl(link_freq);
313 r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
314 if (r) {
315 dev_err(dev, "failed to get menu item\n");
316 return r;
317 }
318
319 if (!qm.value) {
320 dev_err(dev, "error invalid link_freq\n");
321 return -EINVAL;
322 }
323 freq = qm.value;
324
325 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
326 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
327 freq,
328 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
329 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
330 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
331 freq,
332 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
333 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
334 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
335 freq,
336 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
337 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
338 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
339 freq,
340 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
341
342 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
343 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
344 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
345 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
346
347 return 0;
348 };
349
350 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
351 {
352 static const int NUM_VCS = 4;
353 static const int SID;
354 static const int ENTRY;
355 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
356 CIO2_FBPT_SUBENTRY_UNIT);
357 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
358 const struct ipu3_cio2_fmt *fmt;
359 void __iomem *const base = cio2->base;
360 u8 lanes, csi2bus = q->csi2.port;
361 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
362 struct cio2_csi2_timing timing;
363 int i, r;
364
365 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
366 if (!fmt)
367 return -EINVAL;
368
369 lanes = q->csi2.lanes;
370
371 r = cio2_csi2_calc_timing(cio2, q, &timing);
372 if (r)
373 return r;
374
375 writel(timing.clk_termen, q->csi_rx_base +
376 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
377 writel(timing.clk_settle, q->csi_rx_base +
378 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
379
380 for (i = 0; i < lanes; i++) {
381 writel(timing.dat_termen, q->csi_rx_base +
382 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
383 writel(timing.dat_settle, q->csi_rx_base +
384 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
385 }
386
387 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
388 CIO2_PBM_WMCTRL1_MID1_2CK |
389 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
390 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
391 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
392 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
393 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
394 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
395 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
396 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
397 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
398 CIO2_PBM_ARB_CTRL_LE_EN |
399 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
400 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
401 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
402 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
403 base + CIO2_REG_PBM_ARB_CTRL);
404 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
405 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
406 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
407 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
408
409 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
410 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
411
412
413 for (i = 0; i < NUM_VCS; i++)
414 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
415
416
417 for (i = 0; i < 16; i++)
418 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
419 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
420 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
421 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
422
423 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
424 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
425 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
426 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
427 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
428 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
429
430 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
431 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
432 base + CIO2_REG_INT_EN);
433
434 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
435 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
436 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
437 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
438 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
439 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
440 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
441 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
442 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
443 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
444
445 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
446 writel(CIO2_CGC_PRIM_TGE |
447 CIO2_CGC_SIDE_TGE |
448 CIO2_CGC_XOSC_TGE |
449 CIO2_CGC_D3I3_TGE |
450 CIO2_CGC_CSI2_INTERFRAME_TGE |
451 CIO2_CGC_CSI2_PORT_DCGE |
452 CIO2_CGC_SIDE_DCGE |
453 CIO2_CGC_PRIM_DCGE |
454 CIO2_CGC_ROSC_DCGE |
455 CIO2_CGC_XOSC_DCGE |
456 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
457 CIO2_CGC_CSI_CLKGATE_HOLDOFF
458 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
459 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
460 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
461 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
462 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
463 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
464 base + CIO2_REG_LTRVAL01);
465 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
466 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
467 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
468 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
469 base + CIO2_REG_LTRVAL23);
470
471 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
472 writel(0, base + CIO2_REG_CDMABA(i));
473 writel(0, base + CIO2_REG_CDMAC0(i));
474 writel(0, base + CIO2_REG_CDMAC1(i));
475 }
476
477
478 writel(q->fbpt_bus_addr >> PAGE_SHIFT,
479 base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
480
481 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
482 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
483 CIO2_CDMAC0_DMA_INTR_ON_FE |
484 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
485 CIO2_CDMAC0_DMA_EN |
486 CIO2_CDMAC0_DMA_INTR_ON_FS |
487 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
488
489 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
490 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
491
492 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
493
494 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
495 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
496 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
497 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
498 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
499
500
501 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
502 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
503 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
504 writel(~0, base + CIO2_REG_INT_STS);
505
506
507 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
508 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
509
510 return 0;
511 }
512
513 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
514 {
515 void __iomem *base = cio2->base;
516 unsigned int i, maxloops = 1000;
517
518
519 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
520 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
521 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
522 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
523
524
525 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
526 do {
527 if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
528 CIO2_CDMAC0_DMA_HALTED)
529 break;
530 usleep_range(1000, 2000);
531 } while (--maxloops);
532 if (!maxloops)
533 dev_err(&cio2->pci_dev->dev,
534 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
535
536 for (i = 0; i < CIO2_NUM_PORTS; i++) {
537 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
538 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
539 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
540 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
541 }
542 }
543
544 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
545 {
546 struct device *dev = &cio2->pci_dev->dev;
547 struct cio2_queue *q = cio2->cur_queue;
548 int buffers_found = 0;
549 u64 ns = ktime_get_ns();
550
551 if (dma_chan >= CIO2_QUEUES) {
552 dev_err(dev, "bad DMA channel %i\n", dma_chan);
553 return;
554 }
555
556
557 do {
558 struct cio2_fbpt_entry *const entry =
559 &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
560 struct cio2_buffer *b;
561
562 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
563 break;
564
565 b = q->bufs[q->bufs_first];
566 if (b) {
567 unsigned int bytes = entry[1].second_entry.num_of_bytes;
568
569 q->bufs[q->bufs_first] = NULL;
570 atomic_dec(&q->bufs_queued);
571 dev_dbg(&cio2->pci_dev->dev,
572 "buffer %i done\n", b->vbb.vb2_buf.index);
573
574 b->vbb.vb2_buf.timestamp = ns;
575 b->vbb.field = V4L2_FIELD_NONE;
576 b->vbb.sequence = atomic_read(&q->frame_sequence);
577 if (b->vbb.vb2_buf.planes[0].length != bytes)
578 dev_warn(dev, "buffer length is %d received %d\n",
579 b->vbb.vb2_buf.planes[0].length,
580 bytes);
581 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
582 }
583 atomic_inc(&q->frame_sequence);
584 cio2_fbpt_entry_init_dummy(cio2, entry);
585 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
586 buffers_found++;
587 } while (1);
588
589 if (buffers_found == 0)
590 dev_warn(&cio2->pci_dev->dev,
591 "no ready buffers found on DMA channel %u\n",
592 dma_chan);
593 }
594
595 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
596 {
597
598
599
600
601
602 struct v4l2_event event = {
603 .type = V4L2_EVENT_FRAME_SYNC,
604 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
605 };
606
607 v4l2_event_queue(q->subdev.devnode, &event);
608 }
609
610 static const char *const cio2_irq_errs[] = {
611 "single packet header error corrected",
612 "multiple packet header errors detected",
613 "payload checksum (CRC) error",
614 "fifo overflow",
615 "reserved short packet data type detected",
616 "reserved long packet data type detected",
617 "incomplete long packet detected",
618 "frame sync error",
619 "line sync error",
620 "DPHY start of transmission error",
621 "DPHY synchronization error",
622 "escape mode error",
623 "escape mode trigger event",
624 "escape mode ultra-low power state for data lane(s)",
625 "escape mode ultra-low power state exit for clock lane",
626 "inter-frame short packet discarded",
627 "inter-frame long packet discarded",
628 "non-matching Long Packet stalled",
629 };
630
631 static const char *const cio2_port_errs[] = {
632 "ECC recoverable",
633 "DPHY not recoverable",
634 "ECC not recoverable",
635 "CRC error",
636 "INTERFRAMEDATA",
637 "PKT2SHORT",
638 "PKT2LONG",
639 };
640
641 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
642 {
643 void __iomem *const base = cio2->base;
644 struct device *dev = &cio2->pci_dev->dev;
645
646 if (int_status & CIO2_INT_IOOE) {
647
648
649
650
651
652 u32 oe_status, oe_clear;
653
654 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
655 oe_status = oe_clear;
656
657 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
658 dev_err(dev, "DMA output error: 0x%x\n",
659 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
660 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
661 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
662 }
663 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
664 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
665 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
666 >> CIO2_INT_EXT_OE_OES_SHIFT);
667 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
668 }
669 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
670 if (oe_status)
671 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
672 oe_status);
673 int_status &= ~CIO2_INT_IOOE;
674 }
675
676 if (int_status & CIO2_INT_IOC_MASK) {
677
678 u32 clr = 0;
679 unsigned int d;
680
681 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
682 if (int_status & CIO2_INT_IOC(d)) {
683 clr |= CIO2_INT_IOC(d);
684 cio2_buffer_done(cio2, d);
685 }
686 int_status &= ~clr;
687 }
688
689 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
690
691 u32 clr = 0;
692 unsigned int d;
693
694 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
695 if (int_status & CIO2_INT_IOS_IOLN(d)) {
696 clr |= CIO2_INT_IOS_IOLN(d);
697 if (d == CIO2_DMA_CHAN)
698 cio2_queue_event_sof(cio2,
699 cio2->cur_queue);
700 }
701 int_status &= ~clr;
702 }
703
704 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
705
706 u32 ie_status, ie_clear;
707 unsigned int port;
708
709 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
710 ie_status = ie_clear;
711
712 for (port = 0; port < CIO2_NUM_PORTS; port++) {
713 u32 port_status = (ie_status >> (port * 8)) & 0xff;
714 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
715 void __iomem *const csi_rx_base =
716 base + CIO2_REG_PIPE_BASE(port);
717 unsigned int i;
718
719 while (port_status & err_mask) {
720 i = ffs(port_status) - 1;
721 dev_err(dev, "port %i error %s\n",
722 port, cio2_port_errs[i]);
723 ie_status &= ~BIT(port * 8 + i);
724 port_status &= ~BIT(i);
725 }
726
727 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
728 u32 csi2_status, csi2_clear;
729
730 csi2_status = readl(csi_rx_base +
731 CIO2_REG_IRQCTRL_STATUS);
732 csi2_clear = csi2_status;
733 err_mask =
734 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
735
736 while (csi2_status & err_mask) {
737 i = ffs(csi2_status) - 1;
738 dev_err(dev,
739 "CSI-2 receiver port %i: %s\n",
740 port, cio2_irq_errs[i]);
741 csi2_status &= ~BIT(i);
742 }
743
744 writel(csi2_clear,
745 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
746 if (csi2_status)
747 dev_warn(dev,
748 "unknown CSI2 error 0x%x on port %i\n",
749 csi2_status, port);
750
751 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
752 }
753 }
754
755 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
756 if (ie_status)
757 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
758 ie_status);
759
760 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
761 }
762
763 if (int_status)
764 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
765 }
766
767 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
768 {
769 struct cio2_device *cio2 = cio2_ptr;
770 void __iomem *const base = cio2->base;
771 struct device *dev = &cio2->pci_dev->dev;
772 u32 int_status;
773
774 int_status = readl(base + CIO2_REG_INT_STS);
775 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
776 if (!int_status)
777 return IRQ_NONE;
778
779 do {
780 writel(int_status, base + CIO2_REG_INT_STS);
781 cio2_irq_handle_once(cio2, int_status);
782 int_status = readl(base + CIO2_REG_INT_STS);
783 if (int_status)
784 dev_dbg(dev, "pending status 0x%x\n", int_status);
785 } while (int_status);
786
787 return IRQ_HANDLED;
788 }
789
790
791
792 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
793 enum vb2_buffer_state state)
794 {
795 unsigned int i;
796
797 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
798 if (q->bufs[i]) {
799 atomic_dec(&q->bufs_queued);
800 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
801 state);
802 }
803 }
804 }
805
806 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
807 unsigned int *num_buffers,
808 unsigned int *num_planes,
809 unsigned int sizes[],
810 struct device *alloc_devs[])
811 {
812 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
813 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
814 unsigned int i;
815
816 *num_planes = q->format.num_planes;
817
818 for (i = 0; i < *num_planes; ++i) {
819 sizes[i] = q->format.plane_fmt[i].sizeimage;
820 alloc_devs[i] = &cio2->pci_dev->dev;
821 }
822
823 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
824
825
826 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
827 q->bufs[i] = NULL;
828 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
829 }
830 atomic_set(&q->bufs_queued, 0);
831 q->bufs_first = 0;
832 q->bufs_next = 0;
833
834 return 0;
835 }
836
837
838 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
839 {
840 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
841 struct device *dev = &cio2->pci_dev->dev;
842 struct cio2_buffer *b =
843 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
844 static const unsigned int entries_per_page =
845 CIO2_PAGE_SIZE / sizeof(u32);
846 unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
847 unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
848 struct sg_table *sg;
849 struct sg_dma_page_iter sg_iter;
850 int i, j;
851
852 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
853 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
854 vb->planes[0].length);
855 return -ENOSPC;
856 }
857
858 memset(b->lop, 0, sizeof(b->lop));
859
860 for (i = 0; i < lops; i++) {
861 b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
862 &b->lop_bus_addr[i], GFP_KERNEL);
863 if (!b->lop[i])
864 goto fail;
865 }
866
867
868 sg = vb2_dma_sg_plane_desc(vb, 0);
869 if (!sg)
870 return -ENOMEM;
871
872 if (sg->nents && sg->sgl)
873 b->offset = sg->sgl->offset;
874
875 i = j = 0;
876 for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) {
877 if (!pages--)
878 break;
879 b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
880 j++;
881 if (j == entries_per_page) {
882 i++;
883 j = 0;
884 }
885 }
886
887 b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
888 return 0;
889 fail:
890 for (i--; i >= 0; i--)
891 dma_free_coherent(dev, CIO2_PAGE_SIZE,
892 b->lop[i], b->lop_bus_addr[i]);
893 return -ENOMEM;
894 }
895
896
897 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
898 {
899 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
900 struct cio2_queue *q =
901 container_of(vb->vb2_queue, struct cio2_queue, vbq);
902 struct cio2_buffer *b =
903 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
904 struct cio2_fbpt_entry *entry;
905 unsigned long flags;
906 unsigned int i, j, next = q->bufs_next;
907 int bufs_queued = atomic_inc_return(&q->bufs_queued);
908 u32 fbpt_rp;
909
910 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925 local_irq_save(flags);
926
927 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
928 >> CIO2_CDMARI_FBPT_RP_SHIFT)
929 & CIO2_CDMARI_FBPT_RP_MASK;
930
931
932
933
934
935
936 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
937
938 if (bufs_queued <= 1 || fbpt_rp == next)
939
940 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
941
942 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
943
944
945
946
947
948
949 if (!q->bufs[next]) {
950 q->bufs[next] = b;
951 entry = &q->fbpt[next * CIO2_MAX_LOPS];
952 cio2_fbpt_entry_init_buf(cio2, b, entry);
953 local_irq_restore(flags);
954 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
955 for (j = 0; j < vb->num_planes; j++)
956 vb2_set_plane_payload(vb, j,
957 q->format.plane_fmt[j].sizeimage);
958 return;
959 }
960
961 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
962 next = (next + 1) % CIO2_MAX_BUFFERS;
963 }
964
965 local_irq_restore(flags);
966 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
967 atomic_dec(&q->bufs_queued);
968 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
969 }
970
971
972 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
973 {
974 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
975 struct cio2_buffer *b =
976 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
977 unsigned int i;
978
979
980 for (i = 0; i < CIO2_MAX_LOPS; i++) {
981 if (b->lop[i])
982 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
983 b->lop[i], b->lop_bus_addr[i]);
984 }
985 }
986
987 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
988 {
989 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
990 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
991 int r;
992
993 cio2->cur_queue = q;
994 atomic_set(&q->frame_sequence, 0);
995
996 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
997 if (r < 0) {
998 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
999 pm_runtime_put_noidle(&cio2->pci_dev->dev);
1000 return r;
1001 }
1002
1003 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
1004 if (r)
1005 goto fail_pipeline;
1006
1007 r = cio2_hw_init(cio2, q);
1008 if (r)
1009 goto fail_hw;
1010
1011
1012 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1013 if (r)
1014 goto fail_csi2_subdev;
1015
1016 cio2->streaming = true;
1017
1018 return 0;
1019
1020 fail_csi2_subdev:
1021 cio2_hw_exit(cio2, q);
1022 fail_hw:
1023 media_pipeline_stop(&q->vdev.entity);
1024 fail_pipeline:
1025 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1026 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1027 pm_runtime_put(&cio2->pci_dev->dev);
1028
1029 return r;
1030 }
1031
1032 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1033 {
1034 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1035 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1036
1037 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1038 dev_err(&cio2->pci_dev->dev,
1039 "failed to stop sensor streaming\n");
1040
1041 cio2_hw_exit(cio2, q);
1042 synchronize_irq(cio2->pci_dev->irq);
1043 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1044 media_pipeline_stop(&q->vdev.entity);
1045 pm_runtime_put(&cio2->pci_dev->dev);
1046 cio2->streaming = false;
1047 }
1048
1049 static const struct vb2_ops cio2_vb2_ops = {
1050 .buf_init = cio2_vb2_buf_init,
1051 .buf_queue = cio2_vb2_buf_queue,
1052 .buf_cleanup = cio2_vb2_buf_cleanup,
1053 .queue_setup = cio2_vb2_queue_setup,
1054 .start_streaming = cio2_vb2_start_streaming,
1055 .stop_streaming = cio2_vb2_stop_streaming,
1056 .wait_prepare = vb2_ops_wait_prepare,
1057 .wait_finish = vb2_ops_wait_finish,
1058 };
1059
1060
1061
1062 static int cio2_v4l2_querycap(struct file *file, void *fh,
1063 struct v4l2_capability *cap)
1064 {
1065 struct cio2_device *cio2 = video_drvdata(file);
1066
1067 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1068 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1069 snprintf(cap->bus_info, sizeof(cap->bus_info),
1070 "PCI:%s", pci_name(cio2->pci_dev));
1071
1072 return 0;
1073 }
1074
1075 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1076 struct v4l2_fmtdesc *f)
1077 {
1078 if (f->index >= ARRAY_SIZE(formats))
1079 return -EINVAL;
1080
1081 f->pixelformat = formats[f->index].fourcc;
1082
1083 return 0;
1084 }
1085
1086
1087 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1088 {
1089 struct cio2_queue *q = file_to_cio2_queue(file);
1090
1091 f->fmt.pix_mp = q->format;
1092
1093 return 0;
1094 }
1095
1096 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1097 {
1098 const struct ipu3_cio2_fmt *fmt;
1099 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1100
1101 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1102 if (!fmt)
1103 fmt = &formats[0];
1104
1105
1106 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1107 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1108 if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1109 mpix->height = CIO2_IMAGE_MAX_LENGTH;
1110
1111 mpix->num_planes = 1;
1112 mpix->pixelformat = fmt->fourcc;
1113 mpix->colorspace = V4L2_COLORSPACE_RAW;
1114 mpix->field = V4L2_FIELD_NONE;
1115 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1116 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1117 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1118 mpix->height;
1119 memset(mpix->plane_fmt[0].reserved, 0,
1120 sizeof(mpix->plane_fmt[0].reserved));
1121
1122
1123 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1124 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1125 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1126
1127 return 0;
1128 }
1129
1130 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1131 {
1132 struct cio2_queue *q = file_to_cio2_queue(file);
1133
1134 cio2_v4l2_try_fmt(file, fh, f);
1135 q->format = f->fmt.pix_mp;
1136
1137 return 0;
1138 }
1139
1140 static int
1141 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1142 {
1143 if (input->index > 0)
1144 return -EINVAL;
1145
1146 strscpy(input->name, "camera", sizeof(input->name));
1147 input->type = V4L2_INPUT_TYPE_CAMERA;
1148
1149 return 0;
1150 }
1151
1152 static int
1153 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1154 {
1155 *input = 0;
1156
1157 return 0;
1158 }
1159
1160 static int
1161 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1162 {
1163 return input == 0 ? 0 : -EINVAL;
1164 }
1165
1166 static const struct v4l2_file_operations cio2_v4l2_fops = {
1167 .owner = THIS_MODULE,
1168 .unlocked_ioctl = video_ioctl2,
1169 .open = v4l2_fh_open,
1170 .release = vb2_fop_release,
1171 .poll = vb2_fop_poll,
1172 .mmap = vb2_fop_mmap,
1173 };
1174
1175 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1176 .vidioc_querycap = cio2_v4l2_querycap,
1177 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1178 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1179 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1180 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1181 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1182 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1183 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1184 .vidioc_querybuf = vb2_ioctl_querybuf,
1185 .vidioc_qbuf = vb2_ioctl_qbuf,
1186 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1187 .vidioc_streamon = vb2_ioctl_streamon,
1188 .vidioc_streamoff = vb2_ioctl_streamoff,
1189 .vidioc_expbuf = vb2_ioctl_expbuf,
1190 .vidioc_enum_input = cio2_video_enum_input,
1191 .vidioc_g_input = cio2_video_g_input,
1192 .vidioc_s_input = cio2_video_s_input,
1193 };
1194
1195 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1196 struct v4l2_fh *fh,
1197 struct v4l2_event_subscription *sub)
1198 {
1199 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1200 return -EINVAL;
1201
1202
1203 if (sub->id != 0)
1204 return -EINVAL;
1205
1206 return v4l2_event_subscribe(fh, sub, 0, NULL);
1207 }
1208
1209 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1210 {
1211 struct v4l2_mbus_framefmt *format;
1212 const struct v4l2_mbus_framefmt fmt_default = {
1213 .width = 1936,
1214 .height = 1096,
1215 .code = formats[0].mbus_code,
1216 .field = V4L2_FIELD_NONE,
1217 .colorspace = V4L2_COLORSPACE_RAW,
1218 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1219 .quantization = V4L2_QUANTIZATION_DEFAULT,
1220 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1221 };
1222
1223
1224 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1225 *format = fmt_default;
1226
1227
1228 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1229 *format = fmt_default;
1230
1231 return 0;
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1242 struct v4l2_subdev_pad_config *cfg,
1243 struct v4l2_subdev_format *fmt)
1244 {
1245 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1246 struct v4l2_subdev_format format;
1247 int ret;
1248
1249 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1250 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1251 return 0;
1252 }
1253
1254 if (fmt->pad == CIO2_PAD_SINK) {
1255 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1256 ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
1257 &format);
1258
1259 if (ret)
1260 return ret;
1261
1262 q->subdev_fmt.colorspace = format.format.colorspace;
1263 q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
1264 q->subdev_fmt.quantization = format.format.quantization;
1265 q->subdev_fmt.xfer_func = format.format.xfer_func;
1266 }
1267
1268 fmt->format = q->subdev_fmt;
1269
1270 return 0;
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1281 struct v4l2_subdev_pad_config *cfg,
1282 struct v4l2_subdev_format *fmt)
1283 {
1284 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1285
1286
1287
1288
1289
1290 if (fmt->pad == CIO2_PAD_SOURCE)
1291 return cio2_subdev_get_fmt(sd, cfg, fmt);
1292
1293 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1294 *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
1295 } else {
1296
1297 q->subdev_fmt.width = fmt->format.width;
1298 q->subdev_fmt.height = fmt->format.height;
1299 q->subdev_fmt.code = fmt->format.code;
1300 fmt->format = q->subdev_fmt;
1301 }
1302
1303 return 0;
1304 }
1305
1306 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1307 struct v4l2_subdev_pad_config *cfg,
1308 struct v4l2_subdev_mbus_code_enum *code)
1309 {
1310 if (code->index >= ARRAY_SIZE(formats))
1311 return -EINVAL;
1312
1313 code->code = formats[code->index].mbus_code;
1314 return 0;
1315 }
1316
1317 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1318 struct v4l2_subdev_format *fmt)
1319 {
1320 if (is_media_entity_v4l2_subdev(pad->entity)) {
1321 struct v4l2_subdev *sd =
1322 media_entity_to_v4l2_subdev(pad->entity);
1323
1324 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1325 fmt->pad = pad->index;
1326 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1327 }
1328
1329 return -EINVAL;
1330 }
1331
1332 static int cio2_video_link_validate(struct media_link *link)
1333 {
1334 struct video_device *vd = container_of(link->sink->entity,
1335 struct video_device, entity);
1336 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1337 struct cio2_device *cio2 = video_get_drvdata(vd);
1338 struct v4l2_subdev_format source_fmt;
1339 int ret;
1340
1341 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1342 dev_info(&cio2->pci_dev->dev,
1343 "video node %s pad not connected\n", vd->name);
1344 return -ENOTCONN;
1345 }
1346
1347 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1348 if (ret < 0)
1349 return 0;
1350
1351 if (source_fmt.format.width != q->format.width ||
1352 source_fmt.format.height != q->format.height) {
1353 dev_err(&cio2->pci_dev->dev,
1354 "Wrong width or height %ux%u (%ux%u expected)\n",
1355 q->format.width, q->format.height,
1356 source_fmt.format.width, source_fmt.format.height);
1357 return -EINVAL;
1358 }
1359
1360 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1361 return -EINVAL;
1362
1363 return 0;
1364 }
1365
1366 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1367 .subscribe_event = cio2_subdev_subscribe_event,
1368 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1369 };
1370
1371 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1372 .open = cio2_subdev_open,
1373 };
1374
1375 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1376 .link_validate = v4l2_subdev_link_validate_default,
1377 .get_fmt = cio2_subdev_get_fmt,
1378 .set_fmt = cio2_subdev_set_fmt,
1379 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1380 };
1381
1382 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1383 .core = &cio2_subdev_core_ops,
1384 .pad = &cio2_subdev_pad_ops,
1385 };
1386
1387
1388
1389 struct sensor_async_subdev {
1390 struct v4l2_async_subdev asd;
1391 struct csi2_bus_info csi2;
1392 };
1393
1394
1395 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1396 struct v4l2_subdev *sd,
1397 struct v4l2_async_subdev *asd)
1398 {
1399 struct cio2_device *cio2 = container_of(notifier,
1400 struct cio2_device, notifier);
1401 struct sensor_async_subdev *s_asd = container_of(asd,
1402 struct sensor_async_subdev, asd);
1403 struct cio2_queue *q;
1404
1405 if (cio2->queue[s_asd->csi2.port].sensor)
1406 return -EBUSY;
1407
1408 q = &cio2->queue[s_asd->csi2.port];
1409
1410 q->csi2 = s_asd->csi2;
1411 q->sensor = sd;
1412 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1413
1414 return 0;
1415 }
1416
1417
1418 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1419 struct v4l2_subdev *sd,
1420 struct v4l2_async_subdev *asd)
1421 {
1422 struct cio2_device *cio2 = container_of(notifier,
1423 struct cio2_device, notifier);
1424 struct sensor_async_subdev *s_asd = container_of(asd,
1425 struct sensor_async_subdev, asd);
1426
1427 cio2->queue[s_asd->csi2.port].sensor = NULL;
1428 }
1429
1430
1431 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1432 {
1433 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1434 notifier);
1435 struct sensor_async_subdev *s_asd;
1436 struct v4l2_async_subdev *asd;
1437 struct cio2_queue *q;
1438 unsigned int pad;
1439 int ret;
1440
1441 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1442 s_asd = container_of(asd, struct sensor_async_subdev, asd);
1443 q = &cio2->queue[s_asd->csi2.port];
1444
1445 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1446 if (q->sensor->entity.pads[pad].flags &
1447 MEDIA_PAD_FL_SOURCE)
1448 break;
1449
1450 if (pad == q->sensor->entity.num_pads) {
1451 dev_err(&cio2->pci_dev->dev,
1452 "failed to find src pad for %s\n",
1453 q->sensor->name);
1454 return -ENXIO;
1455 }
1456
1457 ret = media_create_pad_link(
1458 &q->sensor->entity, pad,
1459 &q->subdev.entity, CIO2_PAD_SINK,
1460 0);
1461 if (ret) {
1462 dev_err(&cio2->pci_dev->dev,
1463 "failed to create link for %s\n",
1464 q->sensor->name);
1465 return ret;
1466 }
1467 }
1468
1469 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1470 }
1471
1472 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1473 .bound = cio2_notifier_bound,
1474 .unbind = cio2_notifier_unbind,
1475 .complete = cio2_notifier_complete,
1476 };
1477
1478 static int cio2_parse_firmware(struct cio2_device *cio2)
1479 {
1480 unsigned int i;
1481 int ret;
1482
1483 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1484 struct v4l2_fwnode_endpoint vep = {
1485 .bus_type = V4L2_MBUS_CSI2_DPHY
1486 };
1487 struct sensor_async_subdev *s_asd = NULL;
1488 struct fwnode_handle *ep;
1489
1490 ep = fwnode_graph_get_endpoint_by_id(
1491 dev_fwnode(&cio2->pci_dev->dev), i, 0,
1492 FWNODE_GRAPH_ENDPOINT_NEXT);
1493
1494 if (!ep)
1495 continue;
1496
1497 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1498 if (ret)
1499 goto err_parse;
1500
1501 s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
1502 if (!s_asd) {
1503 ret = -ENOMEM;
1504 goto err_parse;
1505 }
1506
1507 s_asd->csi2.port = vep.base.port;
1508 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1509
1510 ret = v4l2_async_notifier_add_fwnode_remote_subdev(
1511 &cio2->notifier, ep, &s_asd->asd);
1512 if (ret)
1513 goto err_parse;
1514
1515 fwnode_handle_put(ep);
1516
1517 continue;
1518
1519 err_parse:
1520 fwnode_handle_put(ep);
1521 kfree(s_asd);
1522 return ret;
1523 }
1524
1525
1526
1527
1528
1529 cio2->notifier.ops = &cio2_async_ops;
1530 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1531 if (ret)
1532 dev_err(&cio2->pci_dev->dev,
1533 "failed to register async notifier : %d\n", ret);
1534
1535 return ret;
1536 }
1537
1538
1539 static const struct media_entity_operations cio2_media_ops = {
1540 .link_validate = v4l2_subdev_link_validate,
1541 };
1542
1543 static const struct media_entity_operations cio2_video_entity_ops = {
1544 .link_validate = cio2_video_link_validate,
1545 };
1546
1547 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1548 {
1549 static const u32 default_width = 1936;
1550 static const u32 default_height = 1096;
1551 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1552
1553 struct video_device *vdev = &q->vdev;
1554 struct vb2_queue *vbq = &q->vbq;
1555 struct v4l2_subdev *subdev = &q->subdev;
1556 struct v4l2_mbus_framefmt *fmt;
1557 int r;
1558
1559
1560 mutex_init(&q->lock);
1561
1562
1563 fmt = &q->subdev_fmt;
1564 fmt->width = default_width;
1565 fmt->height = default_height;
1566 fmt->code = dflt_fmt.mbus_code;
1567 fmt->field = V4L2_FIELD_NONE;
1568
1569 q->format.width = default_width;
1570 q->format.height = default_height;
1571 q->format.pixelformat = dflt_fmt.fourcc;
1572 q->format.colorspace = V4L2_COLORSPACE_RAW;
1573 q->format.field = V4L2_FIELD_NONE;
1574 q->format.num_planes = 1;
1575 q->format.plane_fmt[0].bytesperline =
1576 cio2_bytesperline(q->format.width);
1577 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1578 q->format.height;
1579
1580
1581 r = cio2_fbpt_init(cio2, q);
1582 if (r)
1583 goto fail_fbpt;
1584
1585
1586 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1587 MEDIA_PAD_FL_MUST_CONNECT;
1588 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1589 subdev->entity.ops = &cio2_media_ops;
1590 subdev->internal_ops = &cio2_subdev_internal_ops;
1591 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1592 if (r) {
1593 dev_err(&cio2->pci_dev->dev,
1594 "failed initialize subdev media entity (%d)\n", r);
1595 goto fail_subdev_media_entity;
1596 }
1597
1598 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1599 vdev->entity.ops = &cio2_video_entity_ops;
1600 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1601 if (r) {
1602 dev_err(&cio2->pci_dev->dev,
1603 "failed initialize videodev media entity (%d)\n", r);
1604 goto fail_vdev_media_entity;
1605 }
1606
1607
1608 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1609 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1610 subdev->owner = THIS_MODULE;
1611 snprintf(subdev->name, sizeof(subdev->name),
1612 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1613 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1614 v4l2_set_subdevdata(subdev, cio2);
1615 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1616 if (r) {
1617 dev_err(&cio2->pci_dev->dev,
1618 "failed initialize subdev (%d)\n", r);
1619 goto fail_subdev;
1620 }
1621
1622
1623 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1624 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1625 vbq->ops = &cio2_vb2_ops;
1626 vbq->mem_ops = &vb2_dma_sg_memops;
1627 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1628 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1629 vbq->min_buffers_needed = 1;
1630 vbq->drv_priv = cio2;
1631 vbq->lock = &q->lock;
1632 r = vb2_queue_init(vbq);
1633 if (r) {
1634 dev_err(&cio2->pci_dev->dev,
1635 "failed to initialize videobuf2 queue (%d)\n", r);
1636 goto fail_vbq;
1637 }
1638
1639
1640 snprintf(vdev->name, sizeof(vdev->name),
1641 "%s %td", CIO2_NAME, q - cio2->queue);
1642 vdev->release = video_device_release_empty;
1643 vdev->fops = &cio2_v4l2_fops;
1644 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1645 vdev->lock = &cio2->lock;
1646 vdev->v4l2_dev = &cio2->v4l2_dev;
1647 vdev->queue = &q->vbq;
1648 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1649 video_set_drvdata(vdev, cio2);
1650 r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
1651 if (r) {
1652 dev_err(&cio2->pci_dev->dev,
1653 "failed to register video device (%d)\n", r);
1654 goto fail_vdev;
1655 }
1656
1657
1658 r = media_create_pad_link(
1659 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1660 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1661 if (r)
1662 goto fail_link;
1663
1664 return 0;
1665
1666 fail_link:
1667 video_unregister_device(&q->vdev);
1668 fail_vdev:
1669 vb2_queue_release(vbq);
1670 fail_vbq:
1671 v4l2_device_unregister_subdev(subdev);
1672 fail_subdev:
1673 media_entity_cleanup(&vdev->entity);
1674 fail_vdev_media_entity:
1675 media_entity_cleanup(&subdev->entity);
1676 fail_subdev_media_entity:
1677 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1678 fail_fbpt:
1679 mutex_destroy(&q->lock);
1680
1681 return r;
1682 }
1683
1684 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1685 {
1686 video_unregister_device(&q->vdev);
1687 media_entity_cleanup(&q->vdev.entity);
1688 vb2_queue_release(&q->vbq);
1689 v4l2_device_unregister_subdev(&q->subdev);
1690 media_entity_cleanup(&q->subdev.entity);
1691 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1692 mutex_destroy(&q->lock);
1693 }
1694
1695 static int cio2_queues_init(struct cio2_device *cio2)
1696 {
1697 int i, r;
1698
1699 for (i = 0; i < CIO2_QUEUES; i++) {
1700 r = cio2_queue_init(cio2, &cio2->queue[i]);
1701 if (r)
1702 break;
1703 }
1704
1705 if (i == CIO2_QUEUES)
1706 return 0;
1707
1708 for (i--; i >= 0; i--)
1709 cio2_queue_exit(cio2, &cio2->queue[i]);
1710
1711 return r;
1712 }
1713
1714 static void cio2_queues_exit(struct cio2_device *cio2)
1715 {
1716 unsigned int i;
1717
1718 for (i = 0; i < CIO2_QUEUES; i++)
1719 cio2_queue_exit(cio2, &cio2->queue[i]);
1720 }
1721
1722
1723
1724 static int cio2_pci_config_setup(struct pci_dev *dev)
1725 {
1726 u16 pci_command;
1727 int r = pci_enable_msi(dev);
1728
1729 if (r) {
1730 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
1731 return r;
1732 }
1733
1734 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1735 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1736 PCI_COMMAND_INTX_DISABLE;
1737 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1738
1739 return 0;
1740 }
1741
1742 static int cio2_pci_probe(struct pci_dev *pci_dev,
1743 const struct pci_device_id *id)
1744 {
1745 struct cio2_device *cio2;
1746 void __iomem *const *iomap;
1747 int r;
1748
1749 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1750 if (!cio2)
1751 return -ENOMEM;
1752 cio2->pci_dev = pci_dev;
1753
1754 r = pcim_enable_device(pci_dev);
1755 if (r) {
1756 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1757 return r;
1758 }
1759
1760 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1761 pci_dev->device, pci_dev->revision);
1762
1763 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1764 if (r) {
1765 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1766 return -ENODEV;
1767 }
1768
1769 iomap = pcim_iomap_table(pci_dev);
1770 if (!iomap) {
1771 dev_err(&pci_dev->dev, "failed to iomap table\n");
1772 return -ENODEV;
1773 }
1774
1775 cio2->base = iomap[CIO2_PCI_BAR];
1776
1777 pci_set_drvdata(pci_dev, cio2);
1778
1779 pci_set_master(pci_dev);
1780
1781 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1782 if (r) {
1783 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1784 return -ENODEV;
1785 }
1786
1787 r = cio2_pci_config_setup(pci_dev);
1788 if (r)
1789 return -ENODEV;
1790
1791 r = cio2_fbpt_init_dummy(cio2);
1792 if (r)
1793 return r;
1794
1795 mutex_init(&cio2->lock);
1796
1797 cio2->media_dev.dev = &cio2->pci_dev->dev;
1798 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1799 sizeof(cio2->media_dev.model));
1800 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1801 "PCI:%s", pci_name(cio2->pci_dev));
1802 cio2->media_dev.hw_revision = 0;
1803
1804 media_device_init(&cio2->media_dev);
1805 r = media_device_register(&cio2->media_dev);
1806 if (r < 0)
1807 goto fail_mutex_destroy;
1808
1809 cio2->v4l2_dev.mdev = &cio2->media_dev;
1810 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1811 if (r) {
1812 dev_err(&pci_dev->dev,
1813 "failed to register V4L2 device (%d)\n", r);
1814 goto fail_media_device_unregister;
1815 }
1816
1817 r = cio2_queues_init(cio2);
1818 if (r)
1819 goto fail_v4l2_device_unregister;
1820
1821 v4l2_async_notifier_init(&cio2->notifier);
1822
1823
1824 r = cio2_parse_firmware(cio2);
1825 if (r)
1826 goto fail_clean_notifier;
1827
1828 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1829 IRQF_SHARED, CIO2_NAME, cio2);
1830 if (r) {
1831 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1832 goto fail_clean_notifier;
1833 }
1834
1835 pm_runtime_put_noidle(&pci_dev->dev);
1836 pm_runtime_allow(&pci_dev->dev);
1837
1838 return 0;
1839
1840 fail_clean_notifier:
1841 v4l2_async_notifier_unregister(&cio2->notifier);
1842 v4l2_async_notifier_cleanup(&cio2->notifier);
1843 cio2_queues_exit(cio2);
1844 fail_v4l2_device_unregister:
1845 v4l2_device_unregister(&cio2->v4l2_dev);
1846 fail_media_device_unregister:
1847 media_device_unregister(&cio2->media_dev);
1848 media_device_cleanup(&cio2->media_dev);
1849 fail_mutex_destroy:
1850 mutex_destroy(&cio2->lock);
1851 cio2_fbpt_exit_dummy(cio2);
1852
1853 return r;
1854 }
1855
1856 static void cio2_pci_remove(struct pci_dev *pci_dev)
1857 {
1858 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1859
1860 media_device_unregister(&cio2->media_dev);
1861 v4l2_async_notifier_unregister(&cio2->notifier);
1862 v4l2_async_notifier_cleanup(&cio2->notifier);
1863 cio2_queues_exit(cio2);
1864 cio2_fbpt_exit_dummy(cio2);
1865 v4l2_device_unregister(&cio2->v4l2_dev);
1866 media_device_cleanup(&cio2->media_dev);
1867 mutex_destroy(&cio2->lock);
1868 }
1869
1870 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1871 {
1872 struct pci_dev *pci_dev = to_pci_dev(dev);
1873 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1874 void __iomem *const base = cio2->base;
1875 u16 pm;
1876
1877 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1878 dev_dbg(dev, "cio2 runtime suspend.\n");
1879
1880 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1881 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1882 pm |= CIO2_PMCSR_D3;
1883 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1884
1885 return 0;
1886 }
1887
1888 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1889 {
1890 struct pci_dev *pci_dev = to_pci_dev(dev);
1891 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1892 void __iomem *const base = cio2->base;
1893 u16 pm;
1894
1895 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1896 dev_dbg(dev, "cio2 runtime resume.\n");
1897
1898 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1899 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1900 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1901
1902 return 0;
1903 }
1904
1905
1906
1907
1908
1909 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1910 {
1911 struct {
1912 size_t begin, end;
1913 } arr[2] = {
1914 { 0, start - 1 },
1915 { start, elems - 1 },
1916 };
1917
1918 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1919
1920
1921 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1922 size_t size0, i;
1923
1924
1925
1926
1927
1928 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1929
1930
1931 for (i = 0; i < size0; i++) {
1932 u8 *d = ptr + elem_size * (arr[1].begin + i);
1933 u8 *s = ptr + elem_size * (arr[0].begin + i);
1934 size_t j;
1935
1936 for (j = 0; j < elem_size; j++)
1937 swap(d[j], s[j]);
1938 }
1939
1940 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1941
1942 arr[0].begin += size0;
1943 } else {
1944
1945
1946
1947
1948 arr[0].begin = arr[1].begin;
1949 arr[0].end = arr[1].begin + size0 - 1;
1950 arr[1].begin += size0;
1951 }
1952 }
1953 }
1954
1955 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1956 {
1957 unsigned int i, j;
1958
1959 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1960 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1961 if (q->bufs[j])
1962 break;
1963
1964 if (i == CIO2_MAX_BUFFERS)
1965 return;
1966
1967 if (j) {
1968 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1969 CIO2_MAX_BUFFERS, j);
1970 arrange(q->bufs, sizeof(struct cio2_buffer *),
1971 CIO2_MAX_BUFFERS, j);
1972 }
1973
1974
1975
1976
1977
1978
1979
1980
1981 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1982 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1983 }
1984
1985 static int __maybe_unused cio2_suspend(struct device *dev)
1986 {
1987 struct pci_dev *pci_dev = to_pci_dev(dev);
1988 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1989 struct cio2_queue *q = cio2->cur_queue;
1990
1991 dev_dbg(dev, "cio2 suspend\n");
1992 if (!cio2->streaming)
1993 return 0;
1994
1995
1996 cio2_hw_exit(cio2, q);
1997 synchronize_irq(pci_dev->irq);
1998
1999 pm_runtime_force_suspend(dev);
2000
2001
2002
2003
2004
2005 cio2_fbpt_rearrange(cio2, q);
2006 q->bufs_first = 0;
2007 q->bufs_next = 0;
2008
2009 return 0;
2010 }
2011
2012 static int __maybe_unused cio2_resume(struct device *dev)
2013 {
2014 struct cio2_device *cio2 = dev_get_drvdata(dev);
2015 int r = 0;
2016 struct cio2_queue *q = cio2->cur_queue;
2017
2018 dev_dbg(dev, "cio2 resume\n");
2019 if (!cio2->streaming)
2020 return 0;
2021
2022 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2023 if (r < 0) {
2024 dev_err(&cio2->pci_dev->dev,
2025 "failed to set power %d\n", r);
2026 return r;
2027 }
2028
2029 r = cio2_hw_init(cio2, q);
2030 if (r)
2031 dev_err(dev, "fail to init cio2 hw\n");
2032
2033 return r;
2034 }
2035
2036 static const struct dev_pm_ops cio2_pm_ops = {
2037 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2038 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2039 };
2040
2041 static const struct pci_device_id cio2_pci_id_table[] = {
2042 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2043 { 0 }
2044 };
2045
2046 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2047
2048 static struct pci_driver cio2_pci_driver = {
2049 .name = CIO2_NAME,
2050 .id_table = cio2_pci_id_table,
2051 .probe = cio2_pci_probe,
2052 .remove = cio2_pci_remove,
2053 .driver = {
2054 .pm = &cio2_pm_ops,
2055 },
2056 };
2057
2058 module_pci_driver(cio2_pci_driver);
2059
2060 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2061 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2062 MODULE_AUTHOR("Jian Xu Zheng");
2063 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2064 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2065 MODULE_LICENSE("GPL v2");
2066 MODULE_DESCRIPTION("IPU3 CIO2 driver");