1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
29
30 #include "cx23885.h"
31 #include "cimax2.h"
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
37
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION);
42
43 static unsigned int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "enable debug messages");
46
47 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48 module_param_array(card, int, NULL, 0444);
49 MODULE_PARM_DESC(card, "card type");
50
51 #define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54 } while (0)
55
56 static unsigned int cx23885_devcount;
57
58 #define NO_SYNC_LINE (-1U)
59
60 /* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
62 * CX23887 Assumptions
63 * 1 line = 16 bytes of CDT
64 * cmds size = 80
65 * cdt size = 16 * linesize
66 * iqsize = 64
67 * maxlines = 6
68 *
69 * Address Space:
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
78 */
79
80 static struct sram_channel cx23885_sram_channels[] = {
81 [SRAM_CH01] = {
82 .name = "VID A",
83 .cmds_start = 0x10000,
84 .ctrl_start = 0x10380,
85 .cdt = 0x104c0,
86 .fifo_start = 0x40,
87 .fifo_size = 0x2800,
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
92 },
93 [SRAM_CH02] = {
94 .name = "ch2",
95 .cmds_start = 0x0,
96 .ctrl_start = 0x0,
97 .cdt = 0x0,
98 .fifo_start = 0x0,
99 .fifo_size = 0x0,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
104 },
105 [SRAM_CH03] = {
106 .name = "TS1 B",
107 .cmds_start = 0x100A0,
108 .ctrl_start = 0x10400,
109 .cdt = 0x10580,
110 .fifo_start = 0x5000,
111 .fifo_size = 0x1000,
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
116 },
117 [SRAM_CH04] = {
118 .name = "ch4",
119 .cmds_start = 0x0,
120 .ctrl_start = 0x0,
121 .cdt = 0x0,
122 .fifo_start = 0x0,
123 .fifo_size = 0x0,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
128 },
129 [SRAM_CH05] = {
130 .name = "ch5",
131 .cmds_start = 0x0,
132 .ctrl_start = 0x0,
133 .cdt = 0x0,
134 .fifo_start = 0x0,
135 .fifo_size = 0x0,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
140 },
141 [SRAM_CH06] = {
142 .name = "TS2 C",
143 .cmds_start = 0x10140,
144 .ctrl_start = 0x10440,
145 .cdt = 0x105e0,
146 .fifo_start = 0x6000,
147 .fifo_size = 0x1000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
152 },
153 [SRAM_CH07] = {
154 .name = "TV Audio",
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
157 .cdt = 0x10a00,
158 .fifo_start = 0x7000,
159 .fifo_size = 0x1000,
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
164 },
165 [SRAM_CH08] = {
166 .name = "ch8",
167 .cmds_start = 0x0,
168 .ctrl_start = 0x0,
169 .cdt = 0x0,
170 .fifo_start = 0x0,
171 .fifo_size = 0x0,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
176 },
177 [SRAM_CH09] = {
178 .name = "ch9",
179 .cmds_start = 0x0,
180 .ctrl_start = 0x0,
181 .cdt = 0x0,
182 .fifo_start = 0x0,
183 .fifo_size = 0x0,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
188 },
189 };
190
191 static struct sram_channel cx23887_sram_channels[] = {
192 [SRAM_CH01] = {
193 .name = "VID A",
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
196 .cdt = 0x107b0,
197 .fifo_start = 0x40,
198 .fifo_size = 0x2800,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
203 },
204 [SRAM_CH02] = {
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
208 .cdt = 0x10810,
209 .fifo_start = 0x3000,
210 .fifo_size = 0x1000,
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
215 },
216 [SRAM_CH03] = {
217 .name = "TS1 B",
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
220 .cdt = 0x10870,
221 .fifo_start = 0x5000,
222 .fifo_size = 0x1000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
227 },
228 [SRAM_CH04] = {
229 .name = "ch4",
230 .cmds_start = 0x0,
231 .ctrl_start = 0x0,
232 .cdt = 0x0,
233 .fifo_start = 0x0,
234 .fifo_size = 0x0,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
239 },
240 [SRAM_CH05] = {
241 .name = "ch5",
242 .cmds_start = 0x0,
243 .ctrl_start = 0x0,
244 .cdt = 0x0,
245 .fifo_start = 0x0,
246 .fifo_size = 0x0,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
251 },
252 [SRAM_CH06] = {
253 .name = "TS2 C",
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
256 .cdt = 0x108d0,
257 .fifo_start = 0x6000,
258 .fifo_size = 0x1000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
263 },
264 [SRAM_CH07] = {
265 .name = "TV Audio",
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
268 .cdt = 0x10930,
269 .fifo_start = 0x7000,
270 .fifo_size = 0x1000,
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
275 },
276 [SRAM_CH08] = {
277 .name = "ch8",
278 .cmds_start = 0x0,
279 .ctrl_start = 0x0,
280 .cdt = 0x0,
281 .fifo_start = 0x0,
282 .fifo_size = 0x0,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
287 },
288 [SRAM_CH09] = {
289 .name = "ch9",
290 .cmds_start = 0x0,
291 .ctrl_start = 0x0,
292 .cdt = 0x0,
293 .fifo_start = 0x0,
294 .fifo_size = 0x0,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
299 },
300 };
301
cx23885_irq_add(struct cx23885_dev * dev,u32 mask)302 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303 {
304 unsigned long flags;
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307 dev->pci_irqmask |= mask;
308
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310 }
311
cx23885_irq_add_enable(struct cx23885_dev * dev,u32 mask)312 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313 {
314 unsigned long flags;
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
319
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321 }
322
cx23885_irq_enable(struct cx23885_dev * dev,u32 mask)323 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324 {
325 u32 v;
326 unsigned long flags;
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329 v = mask & dev->pci_irqmask;
330 if (v)
331 cx_set(PCI_INT_MSK, v);
332
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334 }
335
cx23885_irq_enable_all(struct cx23885_dev * dev)336 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337 {
338 cx23885_irq_enable(dev, 0xffffffff);
339 }
340
cx23885_irq_disable(struct cx23885_dev * dev,u32 mask)341 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342 {
343 unsigned long flags;
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346 cx_clear(PCI_INT_MSK, mask);
347
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 }
350
cx23885_irq_disable_all(struct cx23885_dev * dev)351 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352 {
353 cx23885_irq_disable(dev, 0xffffffff);
354 }
355
cx23885_irq_remove(struct cx23885_dev * dev,u32 mask)356 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357 {
358 unsigned long flags;
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
363
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365 }
366
cx23885_irq_get_mask(struct cx23885_dev * dev)367 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368 {
369 u32 v;
370 unsigned long flags;
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373 v = cx_read(PCI_INT_MSK);
374
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 return v;
377 }
378
cx23885_risc_decode(u32 risc)379 static int cx23885_risc_decode(u32 risc)
380 {
381 static char *instr[16] = {
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
392 };
393 static int incr[16] = {
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
401 };
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
407 };
408 int i;
409
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 if (risc & (1 << (i + 12)))
414 printk(" %s", bits[i]);
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
417 }
418
cx23885_wakeup(struct cx23885_tsport * port,struct cx23885_dmaqueue * q,u32 count)419 static void cx23885_wakeup(struct cx23885_tsport *port,
420 struct cx23885_dmaqueue *q, u32 count)
421 {
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
424
425 if (list_empty(&q->active))
426 return;
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
429
430 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
431 buf->vb.v4l2_buf.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
433 count, q->count);
434 list_del(&buf->queue);
435 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
436 }
437
cx23885_sram_channel_setup(struct cx23885_dev * dev,struct sram_channel * ch,unsigned int bpl,u32 risc)438 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
439 struct sram_channel *ch,
440 unsigned int bpl, u32 risc)
441 {
442 unsigned int i, lines;
443 u32 cdt;
444
445 if (ch->cmds_start == 0) {
446 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
447 ch->name);
448 cx_write(ch->ptr1_reg, 0);
449 cx_write(ch->ptr2_reg, 0);
450 cx_write(ch->cnt2_reg, 0);
451 cx_write(ch->cnt1_reg, 0);
452 return 0;
453 } else {
454 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
455 ch->name);
456 }
457
458 bpl = (bpl + 7) & ~7; /* alignment */
459 cdt = ch->cdt;
460 lines = ch->fifo_size / bpl;
461 if (lines > 6)
462 lines = 6;
463 BUG_ON(lines < 2);
464
465 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
466 cx_write(8 + 4, 12);
467 cx_write(8 + 8, 0);
468
469 /* write CDT */
470 for (i = 0; i < lines; i++) {
471 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
472 ch->fifo_start + bpl*i);
473 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
474 cx_write(cdt + 16*i + 4, 0);
475 cx_write(cdt + 16*i + 8, 0);
476 cx_write(cdt + 16*i + 12, 0);
477 }
478
479 /* write CMDS */
480 if (ch->jumponly)
481 cx_write(ch->cmds_start + 0, 8);
482 else
483 cx_write(ch->cmds_start + 0, risc);
484 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
485 cx_write(ch->cmds_start + 8, cdt);
486 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
487 cx_write(ch->cmds_start + 16, ch->ctrl_start);
488 if (ch->jumponly)
489 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
490 else
491 cx_write(ch->cmds_start + 20, 64 >> 2);
492 for (i = 24; i < 80; i += 4)
493 cx_write(ch->cmds_start + i, 0);
494
495 /* fill registers */
496 cx_write(ch->ptr1_reg, ch->fifo_start);
497 cx_write(ch->ptr2_reg, cdt);
498 cx_write(ch->cnt2_reg, (lines*16) >> 3);
499 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
500
501 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
502 dev->bridge,
503 ch->name,
504 bpl,
505 lines);
506
507 return 0;
508 }
509
cx23885_sram_channel_dump(struct cx23885_dev * dev,struct sram_channel * ch)510 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
511 struct sram_channel *ch)
512 {
513 static char *name[] = {
514 "init risc lo",
515 "init risc hi",
516 "cdt base",
517 "cdt size",
518 "iq base",
519 "iq size",
520 "risc pc lo",
521 "risc pc hi",
522 "iq wr ptr",
523 "iq rd ptr",
524 "cdt current",
525 "pci target lo",
526 "pci target hi",
527 "line / byte",
528 };
529 u32 risc;
530 unsigned int i, j, n;
531
532 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
533 dev->name, ch->name);
534 for (i = 0; i < ARRAY_SIZE(name); i++)
535 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
536 dev->name, name[i],
537 cx_read(ch->cmds_start + 4*i));
538
539 for (i = 0; i < 4; i++) {
540 risc = cx_read(ch->cmds_start + 4 * (i + 14));
541 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
542 cx23885_risc_decode(risc);
543 }
544 for (i = 0; i < (64 >> 2); i += n) {
545 risc = cx_read(ch->ctrl_start + 4 * i);
546 /* No consideration for bits 63-32 */
547
548 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
549 ch->ctrl_start + 4 * i, i);
550 n = cx23885_risc_decode(risc);
551 for (j = 1; j < n; j++) {
552 risc = cx_read(ch->ctrl_start + 4 * (i + j));
553 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
554 dev->name, i+j, risc, j);
555 }
556 }
557
558 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
559 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
560 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
561 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
562 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
563 dev->name, cx_read(ch->ptr1_reg));
564 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
565 dev->name, cx_read(ch->ptr2_reg));
566 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
567 dev->name, cx_read(ch->cnt1_reg));
568 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
569 dev->name, cx_read(ch->cnt2_reg));
570 }
571
cx23885_risc_disasm(struct cx23885_tsport * port,struct cx23885_riscmem * risc)572 static void cx23885_risc_disasm(struct cx23885_tsport *port,
573 struct cx23885_riscmem *risc)
574 {
575 struct cx23885_dev *dev = port->dev;
576 unsigned int i, j, n;
577
578 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
579 dev->name, risc->cpu, (unsigned long)risc->dma);
580 for (i = 0; i < (risc->size >> 2); i += n) {
581 printk(KERN_INFO "%s: %04d: ", dev->name, i);
582 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
583 for (j = 1; j < n; j++)
584 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
585 dev->name, i + j, risc->cpu[i + j], j);
586 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
587 break;
588 }
589 }
590
cx23885_shutdown(struct cx23885_dev * dev)591 static void cx23885_shutdown(struct cx23885_dev *dev)
592 {
593 /* disable RISC controller */
594 cx_write(DEV_CNTRL2, 0);
595
596 /* Disable all IR activity */
597 cx_write(IR_CNTRL_REG, 0);
598
599 /* Disable Video A/B activity */
600 cx_write(VID_A_DMA_CTL, 0);
601 cx_write(VID_B_DMA_CTL, 0);
602 cx_write(VID_C_DMA_CTL, 0);
603
604 /* Disable Audio activity */
605 cx_write(AUD_INT_DMA_CTL, 0);
606 cx_write(AUD_EXT_DMA_CTL, 0);
607
608 /* Disable Serial port */
609 cx_write(UART_CTL, 0);
610
611 /* Disable Interrupts */
612 cx23885_irq_disable_all(dev);
613 cx_write(VID_A_INT_MSK, 0);
614 cx_write(VID_B_INT_MSK, 0);
615 cx_write(VID_C_INT_MSK, 0);
616 cx_write(AUDIO_INT_INT_MSK, 0);
617 cx_write(AUDIO_EXT_INT_MSK, 0);
618
619 }
620
cx23885_reset(struct cx23885_dev * dev)621 static void cx23885_reset(struct cx23885_dev *dev)
622 {
623 dprintk(1, "%s()\n", __func__);
624
625 cx23885_shutdown(dev);
626
627 cx_write(PCI_INT_STAT, 0xffffffff);
628 cx_write(VID_A_INT_STAT, 0xffffffff);
629 cx_write(VID_B_INT_STAT, 0xffffffff);
630 cx_write(VID_C_INT_STAT, 0xffffffff);
631 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
633 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
634 cx_write(PAD_CTRL, 0x00500300);
635
636 mdelay(100);
637
638 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
639 720*4, 0);
640 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
642 188*4, 0);
643 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
646 188*4, 0);
647 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
650
651 cx23885_gpio_setup(dev);
652 }
653
654
cx23885_pci_quirks(struct cx23885_dev * dev)655 static int cx23885_pci_quirks(struct cx23885_dev *dev)
656 {
657 dprintk(1, "%s()\n", __func__);
658
659 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
660 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
661 * occur on the cx23887 bridge.
662 */
663 if (dev->bridge == CX23885_BRIDGE_885)
664 cx_clear(RDR_TLCTL0, 1 << 4);
665
666 return 0;
667 }
668
get_resources(struct cx23885_dev * dev)669 static int get_resources(struct cx23885_dev *dev)
670 {
671 if (request_mem_region(pci_resource_start(dev->pci, 0),
672 pci_resource_len(dev->pci, 0),
673 dev->name))
674 return 0;
675
676 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
677 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
678
679 return -EBUSY;
680 }
681
cx23885_init_tsport(struct cx23885_dev * dev,struct cx23885_tsport * port,int portno)682 static int cx23885_init_tsport(struct cx23885_dev *dev,
683 struct cx23885_tsport *port, int portno)
684 {
685 dprintk(1, "%s(portno=%d)\n", __func__, portno);
686
687 /* Transport bus init dma queue - Common settings */
688 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
689 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
690 port->vld_misc_val = 0x0;
691 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
692
693 spin_lock_init(&port->slock);
694 port->dev = dev;
695 port->nr = portno;
696
697 INIT_LIST_HEAD(&port->mpegq.active);
698 mutex_init(&port->frontends.lock);
699 INIT_LIST_HEAD(&port->frontends.felist);
700 port->frontends.active_fe_id = 0;
701
702 /* This should be hardcoded allow a single frontend
703 * attachment to this tsport, keeping the -dvb.c
704 * code clean and safe.
705 */
706 if (!port->num_frontends)
707 port->num_frontends = 1;
708
709 switch (portno) {
710 case 1:
711 port->reg_gpcnt = VID_B_GPCNT;
712 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
713 port->reg_dma_ctl = VID_B_DMA_CTL;
714 port->reg_lngth = VID_B_LNGTH;
715 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
716 port->reg_gen_ctrl = VID_B_GEN_CTL;
717 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
718 port->reg_sop_status = VID_B_SOP_STATUS;
719 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
720 port->reg_vld_misc = VID_B_VLD_MISC;
721 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
722 port->reg_src_sel = VID_B_SRC_SEL;
723 port->reg_ts_int_msk = VID_B_INT_MSK;
724 port->reg_ts_int_stat = VID_B_INT_STAT;
725 port->sram_chno = SRAM_CH03; /* VID_B */
726 port->pci_irqmask = 0x02; /* VID_B bit1 */
727 break;
728 case 2:
729 port->reg_gpcnt = VID_C_GPCNT;
730 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
731 port->reg_dma_ctl = VID_C_DMA_CTL;
732 port->reg_lngth = VID_C_LNGTH;
733 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
734 port->reg_gen_ctrl = VID_C_GEN_CTL;
735 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
736 port->reg_sop_status = VID_C_SOP_STATUS;
737 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
738 port->reg_vld_misc = VID_C_VLD_MISC;
739 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
740 port->reg_src_sel = 0;
741 port->reg_ts_int_msk = VID_C_INT_MSK;
742 port->reg_ts_int_stat = VID_C_INT_STAT;
743 port->sram_chno = SRAM_CH06; /* VID_C */
744 port->pci_irqmask = 0x04; /* VID_C bit2 */
745 break;
746 default:
747 BUG();
748 }
749
750 return 0;
751 }
752
cx23885_dev_checkrevision(struct cx23885_dev * dev)753 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
754 {
755 switch (cx_read(RDR_CFG2) & 0xff) {
756 case 0x00:
757 /* cx23885 */
758 dev->hwrevision = 0xa0;
759 break;
760 case 0x01:
761 /* CX23885-12Z */
762 dev->hwrevision = 0xa1;
763 break;
764 case 0x02:
765 /* CX23885-13Z/14Z */
766 dev->hwrevision = 0xb0;
767 break;
768 case 0x03:
769 if (dev->pci->device == 0x8880) {
770 /* CX23888-21Z/22Z */
771 dev->hwrevision = 0xc0;
772 } else {
773 /* CX23885-14Z */
774 dev->hwrevision = 0xa4;
775 }
776 break;
777 case 0x04:
778 if (dev->pci->device == 0x8880) {
779 /* CX23888-31Z */
780 dev->hwrevision = 0xd0;
781 } else {
782 /* CX23885-15Z, CX23888-31Z */
783 dev->hwrevision = 0xa5;
784 }
785 break;
786 case 0x0e:
787 /* CX23887-15Z */
788 dev->hwrevision = 0xc0;
789 break;
790 case 0x0f:
791 /* CX23887-14Z */
792 dev->hwrevision = 0xb1;
793 break;
794 default:
795 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
796 __func__, dev->hwrevision);
797 }
798 if (dev->hwrevision)
799 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
800 __func__, dev->hwrevision);
801 else
802 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
803 __func__, dev->hwrevision);
804 }
805
806 /* Find the first v4l2_subdev member of the group id in hw */
cx23885_find_hw(struct cx23885_dev * dev,u32 hw)807 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
808 {
809 struct v4l2_subdev *result = NULL;
810 struct v4l2_subdev *sd;
811
812 spin_lock(&dev->v4l2_dev.lock);
813 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
814 if (sd->grp_id == hw) {
815 result = sd;
816 break;
817 }
818 }
819 spin_unlock(&dev->v4l2_dev.lock);
820 return result;
821 }
822
cx23885_dev_setup(struct cx23885_dev * dev)823 static int cx23885_dev_setup(struct cx23885_dev *dev)
824 {
825 int i;
826
827 spin_lock_init(&dev->pci_irqmask_lock);
828 spin_lock_init(&dev->slock);
829
830 mutex_init(&dev->lock);
831 mutex_init(&dev->gpio_lock);
832
833 atomic_inc(&dev->refcount);
834
835 dev->nr = cx23885_devcount++;
836 sprintf(dev->name, "cx23885[%d]", dev->nr);
837
838 /* Configure the internal memory */
839 if (dev->pci->device == 0x8880) {
840 /* Could be 887 or 888, assume a default */
841 dev->bridge = CX23885_BRIDGE_887;
842 /* Apply a sensible clock frequency for the PCIe bridge */
843 dev->clk_freq = 25000000;
844 dev->sram_channels = cx23887_sram_channels;
845 } else
846 if (dev->pci->device == 0x8852) {
847 dev->bridge = CX23885_BRIDGE_885;
848 /* Apply a sensible clock frequency for the PCIe bridge */
849 dev->clk_freq = 28000000;
850 dev->sram_channels = cx23885_sram_channels;
851 } else
852 BUG();
853
854 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
855 __func__, dev->bridge);
856
857 /* board config */
858 dev->board = UNSET;
859 if (card[dev->nr] < cx23885_bcount)
860 dev->board = card[dev->nr];
861 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
862 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
863 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
864 dev->board = cx23885_subids[i].card;
865 if (UNSET == dev->board) {
866 dev->board = CX23885_BOARD_UNKNOWN;
867 cx23885_card_list(dev);
868 }
869
870 /* If the user specific a clk freq override, apply it */
871 if (cx23885_boards[dev->board].clk_freq > 0)
872 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
873
874 dev->pci_bus = dev->pci->bus->number;
875 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
876 cx23885_irq_add(dev, 0x001f00);
877
878 /* External Master 1 Bus */
879 dev->i2c_bus[0].nr = 0;
880 dev->i2c_bus[0].dev = dev;
881 dev->i2c_bus[0].reg_stat = I2C1_STAT;
882 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
883 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
884 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
885 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
886 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
887
888 /* External Master 2 Bus */
889 dev->i2c_bus[1].nr = 1;
890 dev->i2c_bus[1].dev = dev;
891 dev->i2c_bus[1].reg_stat = I2C2_STAT;
892 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
893 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
894 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
895 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
896 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
897
898 /* Internal Master 3 Bus */
899 dev->i2c_bus[2].nr = 2;
900 dev->i2c_bus[2].dev = dev;
901 dev->i2c_bus[2].reg_stat = I2C3_STAT;
902 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
903 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
904 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
905 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
906 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
907
908 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
909 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
910 cx23885_init_tsport(dev, &dev->ts1, 1);
911
912 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
913 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
914 cx23885_init_tsport(dev, &dev->ts2, 2);
915
916 if (get_resources(dev) < 0) {
917 printk(KERN_ERR "CORE %s No more PCIe resources for "
918 "subsystem: %04x:%04x\n",
919 dev->name, dev->pci->subsystem_vendor,
920 dev->pci->subsystem_device);
921
922 cx23885_devcount--;
923 return -ENODEV;
924 }
925
926 /* PCIe stuff */
927 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
928 pci_resource_len(dev->pci, 0));
929
930 dev->bmmio = (u8 __iomem *)dev->lmmio;
931
932 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
933 dev->name, dev->pci->subsystem_vendor,
934 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
935 dev->board, card[dev->nr] == dev->board ?
936 "insmod option" : "autodetected");
937
938 cx23885_pci_quirks(dev);
939
940 /* Assume some sensible defaults */
941 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
942 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
943 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
944 dev->radio_type = cx23885_boards[dev->board].radio_type;
945 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
946
947 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
948 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
949 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
950 __func__, dev->radio_type, dev->radio_addr);
951
952 /* The cx23417 encoder has GPIO's that need to be initialised
953 * before DVB, so that demodulators and tuners are out of
954 * reset before DVB uses them.
955 */
956 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
957 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
958 cx23885_mc417_init(dev);
959
960 /* init hardware */
961 cx23885_reset(dev);
962
963 cx23885_i2c_register(&dev->i2c_bus[0]);
964 cx23885_i2c_register(&dev->i2c_bus[1]);
965 cx23885_i2c_register(&dev->i2c_bus[2]);
966 cx23885_card_setup(dev);
967 call_all(dev, core, s_power, 0);
968 cx23885_ir_init(dev);
969
970 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
971 if (cx23885_video_register(dev) < 0) {
972 printk(KERN_ERR "%s() Failed to register analog "
973 "video adapters on VID_A\n", __func__);
974 }
975 }
976
977 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
978 if (cx23885_boards[dev->board].num_fds_portb)
979 dev->ts1.num_frontends =
980 cx23885_boards[dev->board].num_fds_portb;
981 if (cx23885_dvb_register(&dev->ts1) < 0) {
982 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
983 __func__);
984 }
985 } else
986 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
987 if (cx23885_417_register(dev) < 0) {
988 printk(KERN_ERR
989 "%s() Failed to register 417 on VID_B\n",
990 __func__);
991 }
992 }
993
994 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
995 if (cx23885_boards[dev->board].num_fds_portc)
996 dev->ts2.num_frontends =
997 cx23885_boards[dev->board].num_fds_portc;
998 if (cx23885_dvb_register(&dev->ts2) < 0) {
999 printk(KERN_ERR
1000 "%s() Failed to register dvb on VID_C\n",
1001 __func__);
1002 }
1003 } else
1004 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1005 if (cx23885_417_register(dev) < 0) {
1006 printk(KERN_ERR
1007 "%s() Failed to register 417 on VID_C\n",
1008 __func__);
1009 }
1010 }
1011
1012 cx23885_dev_checkrevision(dev);
1013
1014 /* disable MSI for NetUP cards, otherwise CI is not working */
1015 if (cx23885_boards[dev->board].ci_type > 0)
1016 cx_clear(RDR_RDRCTL1, 1 << 8);
1017
1018 switch (dev->board) {
1019 case CX23885_BOARD_TEVII_S470:
1020 case CX23885_BOARD_TEVII_S471:
1021 cx_clear(RDR_RDRCTL1, 1 << 8);
1022 break;
1023 }
1024
1025 return 0;
1026 }
1027
cx23885_dev_unregister(struct cx23885_dev * dev)1028 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1029 {
1030 release_mem_region(pci_resource_start(dev->pci, 0),
1031 pci_resource_len(dev->pci, 0));
1032
1033 if (!atomic_dec_and_test(&dev->refcount))
1034 return;
1035
1036 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1037 cx23885_video_unregister(dev);
1038
1039 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1040 cx23885_dvb_unregister(&dev->ts1);
1041
1042 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1043 cx23885_417_unregister(dev);
1044
1045 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1046 cx23885_dvb_unregister(&dev->ts2);
1047
1048 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1049 cx23885_417_unregister(dev);
1050
1051 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1052 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1053 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1054
1055 iounmap(dev->lmmio);
1056 }
1057
cx23885_risc_field(__le32 * rp,struct scatterlist * sglist,unsigned int offset,u32 sync_line,unsigned int bpl,unsigned int padding,unsigned int lines,unsigned int lpi,bool jump)1058 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1059 unsigned int offset, u32 sync_line,
1060 unsigned int bpl, unsigned int padding,
1061 unsigned int lines, unsigned int lpi, bool jump)
1062 {
1063 struct scatterlist *sg;
1064 unsigned int line, todo, sol;
1065
1066
1067 if (jump) {
1068 *(rp++) = cpu_to_le32(RISC_JUMP);
1069 *(rp++) = cpu_to_le32(0);
1070 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1071 }
1072
1073 /* sync instruction */
1074 if (sync_line != NO_SYNC_LINE)
1075 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1076
1077 /* scan lines */
1078 sg = sglist;
1079 for (line = 0; line < lines; line++) {
1080 while (offset && offset >= sg_dma_len(sg)) {
1081 offset -= sg_dma_len(sg);
1082 sg = sg_next(sg);
1083 }
1084
1085 if (lpi && line > 0 && !(line % lpi))
1086 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1087 else
1088 sol = RISC_SOL;
1089
1090 if (bpl <= sg_dma_len(sg)-offset) {
1091 /* fits into current chunk */
1092 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1093 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1094 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1095 offset += bpl;
1096 } else {
1097 /* scanline needs to be split */
1098 todo = bpl;
1099 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1100 (sg_dma_len(sg)-offset));
1101 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1102 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1103 todo -= (sg_dma_len(sg)-offset);
1104 offset = 0;
1105 sg = sg_next(sg);
1106 while (todo > sg_dma_len(sg)) {
1107 *(rp++) = cpu_to_le32(RISC_WRITE|
1108 sg_dma_len(sg));
1109 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1110 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1111 todo -= sg_dma_len(sg);
1112 sg = sg_next(sg);
1113 }
1114 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1115 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1116 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1117 offset += todo;
1118 }
1119 offset += padding;
1120 }
1121
1122 return rp;
1123 }
1124
cx23885_risc_buffer(struct pci_dev * pci,struct cx23885_riscmem * risc,struct scatterlist * sglist,unsigned int top_offset,unsigned int bottom_offset,unsigned int bpl,unsigned int padding,unsigned int lines)1125 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1126 struct scatterlist *sglist, unsigned int top_offset,
1127 unsigned int bottom_offset, unsigned int bpl,
1128 unsigned int padding, unsigned int lines)
1129 {
1130 u32 instructions, fields;
1131 __le32 *rp;
1132
1133 fields = 0;
1134 if (UNSET != top_offset)
1135 fields++;
1136 if (UNSET != bottom_offset)
1137 fields++;
1138
1139 /* estimate risc mem: worst case is one write per page border +
1140 one write per scan line + syncs + jump (all 2 dwords). Padding
1141 can cause next bpl to start close to a page border. First DMA
1142 region may be smaller than PAGE_SIZE */
1143 /* write and jump need and extra dword */
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
1146 instructions += 5;
1147 risc->size = instructions * 12;
1148 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1149 if (risc->cpu == NULL)
1150 return -ENOMEM;
1151
1152 /* write risc instructions */
1153 rp = risc->cpu;
1154 if (UNSET != top_offset)
1155 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1156 bpl, padding, lines, 0, true);
1157 if (UNSET != bottom_offset)
1158 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1159 bpl, padding, lines, 0, UNSET == top_offset);
1160
1161 /* save pointer to jmp instruction address */
1162 risc->jmp = rp;
1163 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1164 return 0;
1165 }
1166
cx23885_risc_databuffer(struct pci_dev * pci,struct cx23885_riscmem * risc,struct scatterlist * sglist,unsigned int bpl,unsigned int lines,unsigned int lpi)1167 int cx23885_risc_databuffer(struct pci_dev *pci,
1168 struct cx23885_riscmem *risc,
1169 struct scatterlist *sglist,
1170 unsigned int bpl,
1171 unsigned int lines, unsigned int lpi)
1172 {
1173 u32 instructions;
1174 __le32 *rp;
1175
1176 /* estimate risc mem: worst case is one write per page border +
1177 one write per scan line + syncs + jump (all 2 dwords). Here
1178 there is no padding and no sync. First DMA region may be smaller
1179 than PAGE_SIZE */
1180 /* Jump and write need an extra dword */
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1182 instructions += 4;
1183
1184 risc->size = instructions * 12;
1185 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1186 if (risc->cpu == NULL)
1187 return -ENOMEM;
1188
1189 /* write risc instructions */
1190 rp = risc->cpu;
1191 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1192 bpl, 0, lines, lpi, lpi == 0);
1193
1194 /* save pointer to jmp instruction address */
1195 risc->jmp = rp;
1196 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1197 return 0;
1198 }
1199
cx23885_risc_vbibuffer(struct pci_dev * pci,struct cx23885_riscmem * risc,struct scatterlist * sglist,unsigned int top_offset,unsigned int bottom_offset,unsigned int bpl,unsigned int padding,unsigned int lines)1200 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1201 struct scatterlist *sglist, unsigned int top_offset,
1202 unsigned int bottom_offset, unsigned int bpl,
1203 unsigned int padding, unsigned int lines)
1204 {
1205 u32 instructions, fields;
1206 __le32 *rp;
1207
1208 fields = 0;
1209 if (UNSET != top_offset)
1210 fields++;
1211 if (UNSET != bottom_offset)
1212 fields++;
1213
1214 /* estimate risc mem: worst case is one write per page border +
1215 one write per scan line + syncs + jump (all 2 dwords). Padding
1216 can cause next bpl to start close to a page border. First DMA
1217 region may be smaller than PAGE_SIZE */
1218 /* write and jump need and extra dword */
1219 instructions = fields * (1 + ((bpl + padding) * lines)
1220 / PAGE_SIZE + lines);
1221 instructions += 5;
1222 risc->size = instructions * 12;
1223 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1224 if (risc->cpu == NULL)
1225 return -ENOMEM;
1226 /* write risc instructions */
1227 rp = risc->cpu;
1228
1229 /* Sync to line 6, so US CC line 21 will appear in line '12'
1230 * in the userland vbi payload */
1231 if (UNSET != top_offset)
1232 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1233 bpl, padding, lines, 0, true);
1234
1235 if (UNSET != bottom_offset)
1236 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1237 bpl, padding, lines, 0, UNSET == top_offset);
1238
1239
1240
1241 /* save pointer to jmp instruction address */
1242 risc->jmp = rp;
1243 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1244 return 0;
1245 }
1246
1247
cx23885_free_buffer(struct cx23885_dev * dev,struct cx23885_buffer * buf)1248 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1249 {
1250 struct cx23885_riscmem *risc = &buf->risc;
1251
1252 BUG_ON(in_interrupt());
1253 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1254 }
1255
cx23885_tsport_reg_dump(struct cx23885_tsport * port)1256 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1257 {
1258 struct cx23885_dev *dev = port->dev;
1259
1260 dprintk(1, "%s() Register Dump\n", __func__);
1261 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1262 cx_read(DEV_CNTRL2));
1263 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1264 cx23885_irq_get_mask(dev));
1265 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1266 cx_read(AUDIO_INT_INT_MSK));
1267 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1268 cx_read(AUD_INT_DMA_CTL));
1269 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1270 cx_read(AUDIO_EXT_INT_MSK));
1271 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1272 cx_read(AUD_EXT_DMA_CTL));
1273 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1274 cx_read(PAD_CTRL));
1275 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1276 cx_read(ALT_PIN_OUT_SEL));
1277 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1278 cx_read(GPIO2));
1279 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1280 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1281 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1282 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1283 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1284 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1285 if (port->reg_src_sel)
1286 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1287 port->reg_src_sel, cx_read(port->reg_src_sel));
1288 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1289 port->reg_lngth, cx_read(port->reg_lngth));
1290 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1291 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1292 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1293 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1294 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1295 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1296 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1297 port->reg_sop_status, cx_read(port->reg_sop_status));
1298 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1299 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1300 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1301 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1302 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1303 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1304 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1305 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1306 }
1307
cx23885_start_dma(struct cx23885_tsport * port,struct cx23885_dmaqueue * q,struct cx23885_buffer * buf)1308 int cx23885_start_dma(struct cx23885_tsport *port,
1309 struct cx23885_dmaqueue *q,
1310 struct cx23885_buffer *buf)
1311 {
1312 struct cx23885_dev *dev = port->dev;
1313 u32 reg;
1314
1315 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1316 dev->width, dev->height, dev->field);
1317
1318 /* Stop the fifo and risc engine for this port */
1319 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1320
1321 /* setup fifo + format */
1322 cx23885_sram_channel_setup(dev,
1323 &dev->sram_channels[port->sram_chno],
1324 port->ts_packet_size, buf->risc.dma);
1325 if (debug > 5) {
1326 cx23885_sram_channel_dump(dev,
1327 &dev->sram_channels[port->sram_chno]);
1328 cx23885_risc_disasm(port, &buf->risc);
1329 }
1330
1331 /* write TS length to chip */
1332 cx_write(port->reg_lngth, port->ts_packet_size);
1333
1334 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1335 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1336 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1337 __func__,
1338 cx23885_boards[dev->board].portb,
1339 cx23885_boards[dev->board].portc);
1340 return -EINVAL;
1341 }
1342
1343 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1344 cx23885_av_clk(dev, 0);
1345
1346 udelay(100);
1347
1348 /* If the port supports SRC SELECT, configure it */
1349 if (port->reg_src_sel)
1350 cx_write(port->reg_src_sel, port->src_sel_val);
1351
1352 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1353 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1354 cx_write(port->reg_vld_misc, port->vld_misc_val);
1355 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1356 udelay(100);
1357
1358 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1359 /* reset counter to zero */
1360 cx_write(port->reg_gpcnt_ctl, 3);
1361 q->count = 0;
1362
1363 /* Set VIDB pins to input */
1364 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1365 reg = cx_read(PAD_CTRL);
1366 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1367 cx_write(PAD_CTRL, reg);
1368 }
1369
1370 /* Set VIDC pins to input */
1371 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1372 reg = cx_read(PAD_CTRL);
1373 reg &= ~0x4; /* Clear TS2_SOP_OE */
1374 cx_write(PAD_CTRL, reg);
1375 }
1376
1377 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1378
1379 reg = cx_read(PAD_CTRL);
1380 reg = reg & ~0x1; /* Clear TS1_OE */
1381
1382 /* FIXME, bit 2 writing here is questionable */
1383 /* set TS1_SOP_OE and TS1_OE_HI */
1384 reg = reg | 0xa;
1385 cx_write(PAD_CTRL, reg);
1386
1387 /* FIXME and these two registers should be documented. */
1388 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1389 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1390 }
1391
1392 switch (dev->bridge) {
1393 case CX23885_BRIDGE_885:
1394 case CX23885_BRIDGE_887:
1395 case CX23885_BRIDGE_888:
1396 /* enable irqs */
1397 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1398 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1399 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1400 cx23885_irq_add(dev, port->pci_irqmask);
1401 cx23885_irq_enable_all(dev);
1402 break;
1403 default:
1404 BUG();
1405 }
1406
1407 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1408
1409 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1410 cx23885_av_clk(dev, 1);
1411
1412 if (debug > 4)
1413 cx23885_tsport_reg_dump(port);
1414
1415 return 0;
1416 }
1417
cx23885_stop_dma(struct cx23885_tsport * port)1418 static int cx23885_stop_dma(struct cx23885_tsport *port)
1419 {
1420 struct cx23885_dev *dev = port->dev;
1421 u32 reg;
1422
1423 dprintk(1, "%s()\n", __func__);
1424
1425 /* Stop interrupts and DMA */
1426 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1427 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1428
1429 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1430
1431 reg = cx_read(PAD_CTRL);
1432
1433 /* Set TS1_OE */
1434 reg = reg | 0x1;
1435
1436 /* clear TS1_SOP_OE and TS1_OE_HI */
1437 reg = reg & ~0xa;
1438 cx_write(PAD_CTRL, reg);
1439 cx_write(port->reg_src_sel, 0);
1440 cx_write(port->reg_gen_ctrl, 8);
1441
1442 }
1443
1444 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1445 cx23885_av_clk(dev, 0);
1446
1447 return 0;
1448 }
1449
1450 /* ------------------------------------------------------------------ */
1451
cx23885_buf_prepare(struct cx23885_buffer * buf,struct cx23885_tsport * port)1452 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1453 {
1454 struct cx23885_dev *dev = port->dev;
1455 int size = port->ts_packet_size * port->ts_packet_count;
1456 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
1457
1458 dprintk(1, "%s: %p\n", __func__, buf);
1459 if (vb2_plane_size(&buf->vb, 0) < size)
1460 return -EINVAL;
1461 vb2_set_plane_payload(&buf->vb, 0, size);
1462
1463 cx23885_risc_databuffer(dev->pci, &buf->risc,
1464 sgt->sgl,
1465 port->ts_packet_size, port->ts_packet_count, 0);
1466 return 0;
1467 }
1468
1469 /*
1470 * The risc program for each buffer works as follows: it starts with a simple
1471 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1472 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1473 * the initial JUMP).
1474 *
1475 * This is the risc program of the first buffer to be queued if the active list
1476 * is empty and it just keeps DMAing this buffer without generating any
1477 * interrupts.
1478 *
1479 * If a new buffer is added then the initial JUMP in the code for that buffer
1480 * will generate an interrupt which signals that the previous buffer has been
1481 * DMAed successfully and that it can be returned to userspace.
1482 *
1483 * It also sets the final jump of the previous buffer to the start of the new
1484 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1485 * atomic u32 write, so there is no race condition.
1486 *
1487 * The end-result of all this that you only get an interrupt when a buffer
1488 * is ready, so the control flow is very easy.
1489 */
cx23885_buf_queue(struct cx23885_tsport * port,struct cx23885_buffer * buf)1490 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1491 {
1492 struct cx23885_buffer *prev;
1493 struct cx23885_dev *dev = port->dev;
1494 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1495 unsigned long flags;
1496
1497 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1498 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1499 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1500 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1501
1502 spin_lock_irqsave(&dev->slock, flags);
1503 if (list_empty(&cx88q->active)) {
1504 list_add_tail(&buf->queue, &cx88q->active);
1505 dprintk(1, "[%p/%d] %s - first active\n",
1506 buf, buf->vb.v4l2_buf.index, __func__);
1507 } else {
1508 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1509 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1510 queue);
1511 list_add_tail(&buf->queue, &cx88q->active);
1512 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1513 dprintk(1, "[%p/%d] %s - append to active\n",
1514 buf, buf->vb.v4l2_buf.index, __func__);
1515 }
1516 spin_unlock_irqrestore(&dev->slock, flags);
1517 }
1518
1519 /* ----------------------------------------------------------- */
1520
do_cancel_buffers(struct cx23885_tsport * port,char * reason)1521 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1522 {
1523 struct cx23885_dev *dev = port->dev;
1524 struct cx23885_dmaqueue *q = &port->mpegq;
1525 struct cx23885_buffer *buf;
1526 unsigned long flags;
1527
1528 spin_lock_irqsave(&port->slock, flags);
1529 while (!list_empty(&q->active)) {
1530 buf = list_entry(q->active.next, struct cx23885_buffer,
1531 queue);
1532 list_del(&buf->queue);
1533 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
1534 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1535 buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
1536 }
1537 spin_unlock_irqrestore(&port->slock, flags);
1538 }
1539
cx23885_cancel_buffers(struct cx23885_tsport * port)1540 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1541 {
1542 struct cx23885_dev *dev = port->dev;
1543
1544 dprintk(1, "%s()\n", __func__);
1545 cx23885_stop_dma(port);
1546 do_cancel_buffers(port, "cancel");
1547 }
1548
cx23885_irq_417(struct cx23885_dev * dev,u32 status)1549 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1550 {
1551 /* FIXME: port1 assumption here. */
1552 struct cx23885_tsport *port = &dev->ts1;
1553 int count = 0;
1554 int handled = 0;
1555
1556 if (status == 0)
1557 return handled;
1558
1559 count = cx_read(port->reg_gpcnt);
1560 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1561 status, cx_read(port->reg_ts_int_msk), count);
1562
1563 if ((status & VID_B_MSK_BAD_PKT) ||
1564 (status & VID_B_MSK_OPC_ERR) ||
1565 (status & VID_B_MSK_VBI_OPC_ERR) ||
1566 (status & VID_B_MSK_SYNC) ||
1567 (status & VID_B_MSK_VBI_SYNC) ||
1568 (status & VID_B_MSK_OF) ||
1569 (status & VID_B_MSK_VBI_OF)) {
1570 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1571 "= 0x%x\n", dev->name, status);
1572 if (status & VID_B_MSK_BAD_PKT)
1573 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1574 if (status & VID_B_MSK_OPC_ERR)
1575 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1576 if (status & VID_B_MSK_VBI_OPC_ERR)
1577 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1578 if (status & VID_B_MSK_SYNC)
1579 dprintk(1, " VID_B_MSK_SYNC\n");
1580 if (status & VID_B_MSK_VBI_SYNC)
1581 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1582 if (status & VID_B_MSK_OF)
1583 dprintk(1, " VID_B_MSK_OF\n");
1584 if (status & VID_B_MSK_VBI_OF)
1585 dprintk(1, " VID_B_MSK_VBI_OF\n");
1586
1587 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1588 cx23885_sram_channel_dump(dev,
1589 &dev->sram_channels[port->sram_chno]);
1590 cx23885_417_check_encoder(dev);
1591 } else if (status & VID_B_MSK_RISCI1) {
1592 dprintk(7, " VID_B_MSK_RISCI1\n");
1593 spin_lock(&port->slock);
1594 cx23885_wakeup(port, &port->mpegq, count);
1595 spin_unlock(&port->slock);
1596 }
1597 if (status) {
1598 cx_write(port->reg_ts_int_stat, status);
1599 handled = 1;
1600 }
1601
1602 return handled;
1603 }
1604
cx23885_irq_ts(struct cx23885_tsport * port,u32 status)1605 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1606 {
1607 struct cx23885_dev *dev = port->dev;
1608 int handled = 0;
1609 u32 count;
1610
1611 if ((status & VID_BC_MSK_OPC_ERR) ||
1612 (status & VID_BC_MSK_BAD_PKT) ||
1613 (status & VID_BC_MSK_SYNC) ||
1614 (status & VID_BC_MSK_OF)) {
1615
1616 if (status & VID_BC_MSK_OPC_ERR)
1617 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1618 VID_BC_MSK_OPC_ERR);
1619
1620 if (status & VID_BC_MSK_BAD_PKT)
1621 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1622 VID_BC_MSK_BAD_PKT);
1623
1624 if (status & VID_BC_MSK_SYNC)
1625 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1626 VID_BC_MSK_SYNC);
1627
1628 if (status & VID_BC_MSK_OF)
1629 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1630 VID_BC_MSK_OF);
1631
1632 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1633
1634 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1635 cx23885_sram_channel_dump(dev,
1636 &dev->sram_channels[port->sram_chno]);
1637
1638 } else if (status & VID_BC_MSK_RISCI1) {
1639
1640 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1641
1642 spin_lock(&port->slock);
1643 count = cx_read(port->reg_gpcnt);
1644 cx23885_wakeup(port, &port->mpegq, count);
1645 spin_unlock(&port->slock);
1646
1647 }
1648 if (status) {
1649 cx_write(port->reg_ts_int_stat, status);
1650 handled = 1;
1651 }
1652
1653 return handled;
1654 }
1655
cx23885_irq(int irq,void * dev_id)1656 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1657 {
1658 struct cx23885_dev *dev = dev_id;
1659 struct cx23885_tsport *ts1 = &dev->ts1;
1660 struct cx23885_tsport *ts2 = &dev->ts2;
1661 u32 pci_status, pci_mask;
1662 u32 vida_status, vida_mask;
1663 u32 audint_status, audint_mask;
1664 u32 ts1_status, ts1_mask;
1665 u32 ts2_status, ts2_mask;
1666 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1667 int audint_count = 0;
1668 bool subdev_handled;
1669
1670 pci_status = cx_read(PCI_INT_STAT);
1671 pci_mask = cx23885_irq_get_mask(dev);
1672 vida_status = cx_read(VID_A_INT_STAT);
1673 vida_mask = cx_read(VID_A_INT_MSK);
1674 audint_status = cx_read(AUDIO_INT_INT_STAT);
1675 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1676 ts1_status = cx_read(VID_B_INT_STAT);
1677 ts1_mask = cx_read(VID_B_INT_MSK);
1678 ts2_status = cx_read(VID_C_INT_STAT);
1679 ts2_mask = cx_read(VID_C_INT_MSK);
1680
1681 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1682 goto out;
1683
1684 vida_count = cx_read(VID_A_GPCNT);
1685 audint_count = cx_read(AUD_INT_A_GPCNT);
1686 ts1_count = cx_read(ts1->reg_gpcnt);
1687 ts2_count = cx_read(ts2->reg_gpcnt);
1688 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1689 pci_status, pci_mask);
1690 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1691 vida_status, vida_mask, vida_count);
1692 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1693 audint_status, audint_mask, audint_count);
1694 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1695 ts1_status, ts1_mask, ts1_count);
1696 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1697 ts2_status, ts2_mask, ts2_count);
1698
1699 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1700 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1701 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1702 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1703 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1704 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1705
1706 if (pci_status & PCI_MSK_RISC_RD)
1707 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1708 PCI_MSK_RISC_RD);
1709
1710 if (pci_status & PCI_MSK_RISC_WR)
1711 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1712 PCI_MSK_RISC_WR);
1713
1714 if (pci_status & PCI_MSK_AL_RD)
1715 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1716 PCI_MSK_AL_RD);
1717
1718 if (pci_status & PCI_MSK_AL_WR)
1719 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1720 PCI_MSK_AL_WR);
1721
1722 if (pci_status & PCI_MSK_APB_DMA)
1723 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1724 PCI_MSK_APB_DMA);
1725
1726 if (pci_status & PCI_MSK_VID_C)
1727 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1728 PCI_MSK_VID_C);
1729
1730 if (pci_status & PCI_MSK_VID_B)
1731 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1732 PCI_MSK_VID_B);
1733
1734 if (pci_status & PCI_MSK_VID_A)
1735 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1736 PCI_MSK_VID_A);
1737
1738 if (pci_status & PCI_MSK_AUD_INT)
1739 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1740 PCI_MSK_AUD_INT);
1741
1742 if (pci_status & PCI_MSK_AUD_EXT)
1743 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1744 PCI_MSK_AUD_EXT);
1745
1746 if (pci_status & PCI_MSK_GPIO0)
1747 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1748 PCI_MSK_GPIO0);
1749
1750 if (pci_status & PCI_MSK_GPIO1)
1751 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1752 PCI_MSK_GPIO1);
1753
1754 if (pci_status & PCI_MSK_AV_CORE)
1755 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1756 PCI_MSK_AV_CORE);
1757
1758 if (pci_status & PCI_MSK_IR)
1759 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1760 PCI_MSK_IR);
1761 }
1762
1763 if (cx23885_boards[dev->board].ci_type == 1 &&
1764 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1765 handled += netup_ci_slot_status(dev, pci_status);
1766
1767 if (cx23885_boards[dev->board].ci_type == 2 &&
1768 (pci_status & PCI_MSK_GPIO0))
1769 handled += altera_ci_irq(dev);
1770
1771 if (ts1_status) {
1772 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1773 handled += cx23885_irq_ts(ts1, ts1_status);
1774 else
1775 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1776 handled += cx23885_irq_417(dev, ts1_status);
1777 }
1778
1779 if (ts2_status) {
1780 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1781 handled += cx23885_irq_ts(ts2, ts2_status);
1782 else
1783 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1784 handled += cx23885_irq_417(dev, ts2_status);
1785 }
1786
1787 if (vida_status)
1788 handled += cx23885_video_irq(dev, vida_status);
1789
1790 if (audint_status)
1791 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1792
1793 if (pci_status & PCI_MSK_IR) {
1794 subdev_handled = false;
1795 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1796 pci_status, &subdev_handled);
1797 if (subdev_handled)
1798 handled++;
1799 }
1800
1801 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1802 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1803 schedule_work(&dev->cx25840_work);
1804 handled++;
1805 }
1806
1807 if (handled)
1808 cx_write(PCI_INT_STAT, pci_status);
1809 out:
1810 return IRQ_RETVAL(handled);
1811 }
1812
cx23885_v4l2_dev_notify(struct v4l2_subdev * sd,unsigned int notification,void * arg)1813 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1814 unsigned int notification, void *arg)
1815 {
1816 struct cx23885_dev *dev;
1817
1818 if (sd == NULL)
1819 return;
1820
1821 dev = to_cx23885(sd->v4l2_dev);
1822
1823 switch (notification) {
1824 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1825 if (sd == dev->sd_ir)
1826 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1827 break;
1828 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1829 if (sd == dev->sd_ir)
1830 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1831 break;
1832 }
1833 }
1834
cx23885_v4l2_dev_notify_init(struct cx23885_dev * dev)1835 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1836 {
1837 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1838 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1839 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1840 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1841 }
1842
encoder_on_portb(struct cx23885_dev * dev)1843 static inline int encoder_on_portb(struct cx23885_dev *dev)
1844 {
1845 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1846 }
1847
encoder_on_portc(struct cx23885_dev * dev)1848 static inline int encoder_on_portc(struct cx23885_dev *dev)
1849 {
1850 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1851 }
1852
1853 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1854 * registers depending on the board configuration (and whether the
1855 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1856 * be pushed into the correct hardware register, regardless of the
1857 * physical location. Certain registers are shared so we sanity check
1858 * and report errors if we think we're tampering with a GPIo that might
1859 * be assigned to the encoder (and used for the host bus).
1860 *
1861 * GPIO 2 thru 0 - On the cx23885 bridge
1862 * GPIO 18 thru 3 - On the cx23417 host bus interface
1863 * GPIO 23 thru 19 - On the cx25840 a/v core
1864 */
cx23885_gpio_set(struct cx23885_dev * dev,u32 mask)1865 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1866 {
1867 if (mask & 0x7)
1868 cx_set(GP0_IO, mask & 0x7);
1869
1870 if (mask & 0x0007fff8) {
1871 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1872 printk(KERN_ERR
1873 "%s: Setting GPIO on encoder ports\n",
1874 dev->name);
1875 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1876 }
1877
1878 /* TODO: 23-19 */
1879 if (mask & 0x00f80000)
1880 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1881 }
1882
cx23885_gpio_clear(struct cx23885_dev * dev,u32 mask)1883 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1884 {
1885 if (mask & 0x00000007)
1886 cx_clear(GP0_IO, mask & 0x7);
1887
1888 if (mask & 0x0007fff8) {
1889 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1890 printk(KERN_ERR
1891 "%s: Clearing GPIO moving on encoder ports\n",
1892 dev->name);
1893 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1894 }
1895
1896 /* TODO: 23-19 */
1897 if (mask & 0x00f80000)
1898 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1899 }
1900
cx23885_gpio_get(struct cx23885_dev * dev,u32 mask)1901 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1902 {
1903 if (mask & 0x00000007)
1904 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1905
1906 if (mask & 0x0007fff8) {
1907 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1908 printk(KERN_ERR
1909 "%s: Reading GPIO moving on encoder ports\n",
1910 dev->name);
1911 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1912 }
1913
1914 /* TODO: 23-19 */
1915 if (mask & 0x00f80000)
1916 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1917
1918 return 0;
1919 }
1920
cx23885_gpio_enable(struct cx23885_dev * dev,u32 mask,int asoutput)1921 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1922 {
1923 if ((mask & 0x00000007) && asoutput)
1924 cx_set(GP0_IO, (mask & 0x7) << 16);
1925 else if ((mask & 0x00000007) && !asoutput)
1926 cx_clear(GP0_IO, (mask & 0x7) << 16);
1927
1928 if (mask & 0x0007fff8) {
1929 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1930 printk(KERN_ERR
1931 "%s: Enabling GPIO on encoder ports\n",
1932 dev->name);
1933 }
1934
1935 /* MC417_OEN is active low for output, write 1 for an input */
1936 if ((mask & 0x0007fff8) && asoutput)
1937 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1938
1939 else if ((mask & 0x0007fff8) && !asoutput)
1940 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1941
1942 /* TODO: 23-19 */
1943 }
1944
cx23885_initdev(struct pci_dev * pci_dev,const struct pci_device_id * pci_id)1945 static int cx23885_initdev(struct pci_dev *pci_dev,
1946 const struct pci_device_id *pci_id)
1947 {
1948 struct cx23885_dev *dev;
1949 struct v4l2_ctrl_handler *hdl;
1950 int err;
1951
1952 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1953 if (NULL == dev)
1954 return -ENOMEM;
1955
1956 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1957 if (err < 0)
1958 goto fail_free;
1959
1960 hdl = &dev->ctrl_handler;
1961 v4l2_ctrl_handler_init(hdl, 6);
1962 if (hdl->error) {
1963 err = hdl->error;
1964 goto fail_ctrl;
1965 }
1966 dev->v4l2_dev.ctrl_handler = hdl;
1967
1968 /* Prepare to handle notifications from subdevices */
1969 cx23885_v4l2_dev_notify_init(dev);
1970
1971 /* pci init */
1972 dev->pci = pci_dev;
1973 if (pci_enable_device(pci_dev)) {
1974 err = -EIO;
1975 goto fail_ctrl;
1976 }
1977
1978 if (cx23885_dev_setup(dev) < 0) {
1979 err = -EINVAL;
1980 goto fail_ctrl;
1981 }
1982
1983 /* print pci info */
1984 dev->pci_rev = pci_dev->revision;
1985 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1986 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1987 "latency: %d, mmio: 0x%llx\n", dev->name,
1988 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1989 dev->pci_lat,
1990 (unsigned long long)pci_resource_start(pci_dev, 0));
1991
1992 pci_set_master(pci_dev);
1993 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1994 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1995 err = -EIO;
1996 goto fail_context;
1997 }
1998
1999 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
2000 if (IS_ERR(dev->alloc_ctx)) {
2001 err = PTR_ERR(dev->alloc_ctx);
2002 goto fail_context;
2003 }
2004 err = request_irq(pci_dev->irq, cx23885_irq,
2005 IRQF_SHARED, dev->name, dev);
2006 if (err < 0) {
2007 printk(KERN_ERR "%s: can't get IRQ %d\n",
2008 dev->name, pci_dev->irq);
2009 goto fail_irq;
2010 }
2011
2012 switch (dev->board) {
2013 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2014 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2015 break;
2016 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2017 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2018 break;
2019 }
2020
2021 /*
2022 * The CX2388[58] IR controller can start firing interrupts when
2023 * enabled, so these have to take place after the cx23885_irq() handler
2024 * is hooked up by the call to request_irq() above.
2025 */
2026 cx23885_ir_pci_int_enable(dev);
2027 cx23885_input_init(dev);
2028
2029 return 0;
2030
2031 fail_irq:
2032 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2033 fail_context:
2034 cx23885_dev_unregister(dev);
2035 fail_ctrl:
2036 v4l2_ctrl_handler_free(hdl);
2037 v4l2_device_unregister(&dev->v4l2_dev);
2038 fail_free:
2039 kfree(dev);
2040 return err;
2041 }
2042
cx23885_finidev(struct pci_dev * pci_dev)2043 static void cx23885_finidev(struct pci_dev *pci_dev)
2044 {
2045 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2046 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2047
2048 cx23885_input_fini(dev);
2049 cx23885_ir_fini(dev);
2050
2051 cx23885_shutdown(dev);
2052
2053 /* unregister stuff */
2054 free_irq(pci_dev->irq, dev);
2055
2056 pci_disable_device(pci_dev);
2057
2058 cx23885_dev_unregister(dev);
2059 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2060 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2061 v4l2_device_unregister(v4l2_dev);
2062 kfree(dev);
2063 }
2064
2065 static struct pci_device_id cx23885_pci_tbl[] = {
2066 {
2067 /* CX23885 */
2068 .vendor = 0x14f1,
2069 .device = 0x8852,
2070 .subvendor = PCI_ANY_ID,
2071 .subdevice = PCI_ANY_ID,
2072 }, {
2073 /* CX23887 Rev 2 */
2074 .vendor = 0x14f1,
2075 .device = 0x8880,
2076 .subvendor = PCI_ANY_ID,
2077 .subdevice = PCI_ANY_ID,
2078 }, {
2079 /* --- end of list --- */
2080 }
2081 };
2082 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2083
2084 static struct pci_driver cx23885_pci_driver = {
2085 .name = "cx23885",
2086 .id_table = cx23885_pci_tbl,
2087 .probe = cx23885_initdev,
2088 .remove = cx23885_finidev,
2089 /* TODO */
2090 .suspend = NULL,
2091 .resume = NULL,
2092 };
2093
cx23885_init(void)2094 static int __init cx23885_init(void)
2095 {
2096 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2097 CX23885_VERSION);
2098 return pci_register_driver(&cx23885_pci_driver);
2099 }
2100
cx23885_fini(void)2101 static void __exit cx23885_fini(void)
2102 {
2103 pci_unregister_driver(&cx23885_pci_driver);
2104 }
2105
2106 module_init(cx23885_init);
2107 module_exit(cx23885_fini);
2108