1/* interrupt handling
2    Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3    Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4    Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
27#include "ivtv-yuv.h"
28#include <media/v4l2-event.h>
29
30#define DMA_MAGIC_COOKIE 0x000001fe
31
32static void ivtv_dma_dec_start(struct ivtv_stream *s);
33
34static const int ivtv_stream_map[] = {
35	IVTV_ENC_STREAM_TYPE_MPG,
36	IVTV_ENC_STREAM_TYPE_YUV,
37	IVTV_ENC_STREAM_TYPE_PCM,
38	IVTV_ENC_STREAM_TYPE_VBI,
39};
40
41static void ivtv_pcm_work_handler(struct ivtv *itv)
42{
43	struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
44	struct ivtv_buffer *buf;
45
46	/* Pass the PCM data to ivtv-alsa */
47
48	while (1) {
49		/*
50		 * Users should not be using both the ALSA and V4L2 PCM audio
51		 * capture interfaces at the same time.  If the user is doing
52		 * this, there maybe a buffer in q_io to grab, use, and put
53		 * back in rotation.
54		 */
55		buf = ivtv_dequeue(s, &s->q_io);
56		if (buf == NULL)
57			buf = ivtv_dequeue(s, &s->q_full);
58		if (buf == NULL)
59			break;
60
61		if (buf->readpos < buf->bytesused)
62			itv->pcm_announce_callback(itv->alsa,
63				(u8 *)(buf->buf + buf->readpos),
64				(size_t)(buf->bytesused - buf->readpos));
65
66		ivtv_enqueue(s, buf, &s->q_free);
67	}
68}
69
70static void ivtv_pio_work_handler(struct ivtv *itv)
71{
72	struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
73	struct ivtv_buffer *buf;
74	int i = 0;
75
76	IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
77	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
78			s->vdev.v4l2_dev == NULL || !ivtv_use_pio(s)) {
79		itv->cur_pio_stream = -1;
80		/* trigger PIO complete user interrupt */
81		write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
82		return;
83	}
84	IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
85	list_for_each_entry(buf, &s->q_dma.list, list) {
86		u32 size = s->sg_processing[i].size & 0x3ffff;
87
88		/* Copy the data from the card to the buffer */
89		if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
90			memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
91		}
92		else {
93			memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
94		}
95		i++;
96		if (i == s->sg_processing_size)
97			break;
98	}
99	write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
100}
101
102void ivtv_irq_work_handler(struct kthread_work *work)
103{
104	struct ivtv *itv = container_of(work, struct ivtv, irq_work);
105
106	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
107		ivtv_pio_work_handler(itv);
108
109	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
110		ivtv_vbi_work_handler(itv);
111
112	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
113		ivtv_yuv_work_handler(itv);
114
115	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags))
116		ivtv_pcm_work_handler(itv);
117}
118
119/* Determine the required DMA size, setup enough buffers in the predma queue and
120   actually copy the data from the card to the buffers in case a PIO transfer is
121   required for this stream.
122 */
123static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
124{
125	struct ivtv *itv = s->itv;
126	struct ivtv_buffer *buf;
127	u32 bytes_needed = 0;
128	u32 offset, size;
129	u32 UVoffset = 0, UVsize = 0;
130	int skip_bufs = s->q_predma.buffers;
131	int idx = s->sg_pending_size;
132	int rc;
133
134	/* sanity checks */
135	if (s->vdev.v4l2_dev == NULL) {
136		IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
137		return -1;
138	}
139	if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
140		IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
141		return -1;
142	}
143
144	/* determine offset, size and PTS for the various streams */
145	switch (s->type) {
146		case IVTV_ENC_STREAM_TYPE_MPG:
147			offset = data[1];
148			size = data[2];
149			s->pending_pts = 0;
150			break;
151
152		case IVTV_ENC_STREAM_TYPE_YUV:
153			offset = data[1];
154			size = data[2];
155			UVoffset = data[3];
156			UVsize = data[4];
157			s->pending_pts = ((u64) data[5] << 32) | data[6];
158			break;
159
160		case IVTV_ENC_STREAM_TYPE_PCM:
161			offset = data[1] + 12;
162			size = data[2] - 12;
163			s->pending_pts = read_dec(offset - 8) |
164				((u64)(read_dec(offset - 12)) << 32);
165			if (itv->has_cx23415)
166				offset += IVTV_DECODER_OFFSET;
167			break;
168
169		case IVTV_ENC_STREAM_TYPE_VBI:
170			size = itv->vbi.enc_size * itv->vbi.fpi;
171			offset = read_enc(itv->vbi.enc_start - 4) + 12;
172			if (offset == 12) {
173				IVTV_DEBUG_INFO("VBI offset == 0\n");
174				return -1;
175			}
176			s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
177			break;
178
179		case IVTV_DEC_STREAM_TYPE_VBI:
180			size = read_dec(itv->vbi.dec_start + 4) + 8;
181			offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
182			s->pending_pts = 0;
183			offset += IVTV_DECODER_OFFSET;
184			break;
185		default:
186			/* shouldn't happen */
187			return -1;
188	}
189
190	/* if this is the start of the DMA then fill in the magic cookie */
191	if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
192		if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
193		    s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
194			s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
195			write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET);
196		}
197		else {
198			s->pending_backup = read_enc(offset);
199			write_enc_sync(DMA_MAGIC_COOKIE, offset);
200		}
201		s->pending_offset = offset;
202	}
203
204	bytes_needed = size;
205	if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
206		/* The size for the Y samples needs to be rounded upwards to a
207		   multiple of the buf_size. The UV samples then start in the
208		   next buffer. */
209		bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
210		bytes_needed += UVsize;
211	}
212
213	IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
214		ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
215
216	rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
217	if (rc < 0) { /* Insufficient buffers */
218		IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
219				bytes_needed, s->name);
220		return -1;
221	}
222	if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
223		IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
224		IVTV_WARN("Cause: the application is not reading fast enough.\n");
225	}
226	s->buffers_stolen = rc;
227
228	/* got the buffers, now fill in sg_pending */
229	buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
230	memset(buf->buf, 0, 128);
231	list_for_each_entry(buf, &s->q_predma.list, list) {
232		if (skip_bufs-- > 0)
233			continue;
234		s->sg_pending[idx].dst = buf->dma_handle;
235		s->sg_pending[idx].src = offset;
236		s->sg_pending[idx].size = s->buf_size;
237		buf->bytesused = min(size, s->buf_size);
238		buf->dma_xfer_cnt = s->dma_xfer_cnt;
239
240		s->q_predma.bytesused += buf->bytesused;
241		size -= buf->bytesused;
242		offset += s->buf_size;
243
244		/* Sync SG buffers */
245		ivtv_buf_sync_for_device(s, buf);
246
247		if (size == 0) {	/* YUV */
248			/* process the UV section */
249			offset = UVoffset;
250			size = UVsize;
251		}
252		idx++;
253	}
254	s->sg_pending_size = idx;
255	return 0;
256}
257
258static void dma_post(struct ivtv_stream *s)
259{
260	struct ivtv *itv = s->itv;
261	struct ivtv_buffer *buf = NULL;
262	struct list_head *p;
263	u32 offset;
264	__le32 *u32buf;
265	int x = 0;
266
267	IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
268			s->name, s->dma_offset);
269	list_for_each(p, &s->q_dma.list) {
270		buf = list_entry(p, struct ivtv_buffer, list);
271		u32buf = (__le32 *)buf->buf;
272
273		/* Sync Buffer */
274		ivtv_buf_sync_for_cpu(s, buf);
275
276		if (x == 0 && ivtv_use_dma(s)) {
277			offset = s->dma_last_offset;
278			if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE)
279			{
280				for (offset = 0; offset < 64; offset++)
281					if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE)
282						break;
283				offset *= 4;
284				if (offset == 256) {
285					IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
286					offset = s->dma_last_offset;
287				}
288				if (s->dma_last_offset != offset)
289					IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
290				s->dma_last_offset = offset;
291			}
292			if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
293						s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
294				write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
295			}
296			else {
297				write_enc_sync(0, s->dma_offset);
298			}
299			if (offset) {
300				buf->bytesused -= offset;
301				memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
302			}
303			*u32buf = cpu_to_le32(s->dma_backup);
304		}
305		x++;
306		/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
307		if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
308		    s->type == IVTV_ENC_STREAM_TYPE_VBI)
309			buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
310	}
311	if (buf)
312		buf->bytesused += s->dma_last_offset;
313	if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
314		list_for_each_entry(buf, &s->q_dma.list, list) {
315			/* Parse and Groom VBI Data */
316			s->q_dma.bytesused -= buf->bytesused;
317			ivtv_process_vbi_data(itv, buf, 0, s->type);
318			s->q_dma.bytesused += buf->bytesused;
319		}
320		if (s->fh == NULL) {
321			ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
322			return;
323		}
324	}
325
326	ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
327
328	if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
329	    itv->pcm_announce_callback != NULL) {
330		/*
331		 * Set up the work handler to pass the data to ivtv-alsa.
332		 *
333		 * We just use q_full and let the work handler race with users
334		 * making ivtv-fileops.c calls on the PCM device node.
335		 *
336		 * Users should not be using both the ALSA and V4L2 PCM audio
337		 * capture interfaces at the same time.  If the user does this,
338		 * fragments of data will just go out each interface as they
339		 * race for PCM data.
340		 */
341		set_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags);
342		set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
343	}
344
345	if (s->fh)
346		wake_up(&s->waitq);
347}
348
349void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
350{
351	struct ivtv *itv = s->itv;
352	struct yuv_playback_info *yi = &itv->yuv_info;
353	u8 frame = yi->draw_frame;
354	struct yuv_frame_info *f = &yi->new_frame_info[frame];
355	struct ivtv_buffer *buf;
356	u32 y_size = 720 * ((f->src_h + 31) & ~31);
357	u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
358	int y_done = 0;
359	int bytes_written = 0;
360	int idx = 0;
361
362	IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
363
364	/* Insert buffer block for YUV if needed */
365	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
366		if (yi->blanking_dmaptr) {
367			s->sg_pending[idx].src = yi->blanking_dmaptr;
368			s->sg_pending[idx].dst = offset;
369			s->sg_pending[idx].size = 720 * 16;
370		}
371		offset += 720 * 16;
372		idx++;
373	}
374
375	list_for_each_entry(buf, &s->q_predma.list, list) {
376		/* YUV UV Offset from Y Buffer */
377		if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
378				(bytes_written + buf->bytesused) >= y_size) {
379			s->sg_pending[idx].src = buf->dma_handle;
380			s->sg_pending[idx].dst = offset;
381			s->sg_pending[idx].size = y_size - bytes_written;
382			offset = uv_offset;
383			if (s->sg_pending[idx].size != buf->bytesused) {
384				idx++;
385				s->sg_pending[idx].src =
386				  buf->dma_handle + s->sg_pending[idx - 1].size;
387				s->sg_pending[idx].dst = offset;
388				s->sg_pending[idx].size =
389				   buf->bytesused - s->sg_pending[idx - 1].size;
390				offset += s->sg_pending[idx].size;
391			}
392			y_done = 1;
393		} else {
394			s->sg_pending[idx].src = buf->dma_handle;
395			s->sg_pending[idx].dst = offset;
396			s->sg_pending[idx].size = buf->bytesused;
397			offset += buf->bytesused;
398		}
399		bytes_written += buf->bytesused;
400
401		/* Sync SG buffers */
402		ivtv_buf_sync_for_device(s, buf);
403		idx++;
404	}
405	s->sg_pending_size = idx;
406
407	/* Sync Hardware SG List of buffers */
408	ivtv_stream_sync_for_device(s);
409	if (lock) {
410		unsigned long flags = 0;
411
412		spin_lock_irqsave(&itv->dma_reg_lock, flags);
413		if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
414			ivtv_dma_dec_start(s);
415		else
416			set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
417		spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
418	} else {
419		if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
420			ivtv_dma_dec_start(s);
421		else
422			set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
423	}
424}
425
426static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
427{
428	struct ivtv *itv = s->itv;
429
430	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
431	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
432	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
433	s->sg_processed++;
434	/* Sync Hardware SG List of buffers */
435	ivtv_stream_sync_for_device(s);
436	write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
437	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
438	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
439	add_timer(&itv->dma_timer);
440}
441
442static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
443{
444	struct ivtv *itv = s->itv;
445
446	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
447	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
448	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
449	s->sg_processed++;
450	/* Sync Hardware SG List of buffers */
451	ivtv_stream_sync_for_device(s);
452	write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
453	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
454	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
455	add_timer(&itv->dma_timer);
456}
457
458/* start the encoder DMA */
459static void ivtv_dma_enc_start(struct ivtv_stream *s)
460{
461	struct ivtv *itv = s->itv;
462	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
463	int i;
464
465	IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
466
467	if (s->q_predma.bytesused)
468		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
469
470	if (ivtv_use_dma(s))
471		s->sg_pending[s->sg_pending_size - 1].size += 256;
472
473	/* If this is an MPEG stream, and VBI data is also pending, then append the
474	   VBI DMA to the MPEG DMA and transfer both sets of data at once.
475
476	   VBI DMA is a second class citizen compared to MPEG and mixing them together
477	   will confuse the firmware (the end of a VBI DMA is seen as the end of a
478	   MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
479	   sure we only use the MPEG DMA to transfer the VBI DMA if both are in
480	   use. This way no conflicts occur. */
481	clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
482	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
483			s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
484		ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
485		if (ivtv_use_dma(s_vbi))
486			s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
487		for (i = 0; i < s_vbi->sg_pending_size; i++) {
488			s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
489		}
490		s_vbi->dma_offset = s_vbi->pending_offset;
491		s_vbi->sg_pending_size = 0;
492		s_vbi->dma_xfer_cnt++;
493		set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
494		IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
495	}
496
497	s->dma_xfer_cnt++;
498	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
499	s->sg_processing_size = s->sg_pending_size;
500	s->sg_pending_size = 0;
501	s->sg_processed = 0;
502	s->dma_offset = s->pending_offset;
503	s->dma_backup = s->pending_backup;
504	s->dma_pts = s->pending_pts;
505
506	if (ivtv_use_pio(s)) {
507		set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
508		set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
509		set_bit(IVTV_F_I_PIO, &itv->i_flags);
510		itv->cur_pio_stream = s->type;
511	}
512	else {
513		itv->dma_retries = 0;
514		ivtv_dma_enc_start_xfer(s);
515		set_bit(IVTV_F_I_DMA, &itv->i_flags);
516		itv->cur_dma_stream = s->type;
517	}
518}
519
520static void ivtv_dma_dec_start(struct ivtv_stream *s)
521{
522	struct ivtv *itv = s->itv;
523
524	if (s->q_predma.bytesused)
525		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
526	s->dma_xfer_cnt++;
527	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
528	s->sg_processing_size = s->sg_pending_size;
529	s->sg_pending_size = 0;
530	s->sg_processed = 0;
531
532	IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
533	itv->dma_retries = 0;
534	ivtv_dma_dec_start_xfer(s);
535	set_bit(IVTV_F_I_DMA, &itv->i_flags);
536	itv->cur_dma_stream = s->type;
537}
538
539static void ivtv_irq_dma_read(struct ivtv *itv)
540{
541	struct ivtv_stream *s = NULL;
542	struct ivtv_buffer *buf;
543	int hw_stream_type = 0;
544
545	IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
546
547	del_timer(&itv->dma_timer);
548
549	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
550		return;
551
552	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
553		s = &itv->streams[itv->cur_dma_stream];
554		ivtv_stream_sync_for_cpu(s);
555
556		if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
557			IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
558					read_reg(IVTV_REG_DMASTATUS),
559					s->sg_processed, s->sg_processing_size, itv->dma_retries);
560			write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
561			if (itv->dma_retries == 3) {
562				/* Too many retries, give up on this frame */
563				itv->dma_retries = 0;
564				s->sg_processed = s->sg_processing_size;
565			}
566			else {
567				/* Retry, starting with the first xfer segment.
568				   Just retrying the current segment is not sufficient. */
569				s->sg_processed = 0;
570				itv->dma_retries++;
571			}
572		}
573		if (s->sg_processed < s->sg_processing_size) {
574			/* DMA next buffer */
575			ivtv_dma_dec_start_xfer(s);
576			return;
577		}
578		if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
579			hw_stream_type = 2;
580		IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
581
582		/* For some reason must kick the firmware, like PIO mode,
583		   I think this tells the firmware we are done and the size
584		   of the xfer so it can calculate what we need next.
585		   I think we can do this part ourselves but would have to
586		   fully calculate xfer info ourselves and not use interrupts
587		 */
588		ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
589				hw_stream_type);
590
591		/* Free last DMA call */
592		while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
593			ivtv_buf_sync_for_cpu(s, buf);
594			ivtv_enqueue(s, buf, &s->q_free);
595		}
596		wake_up(&s->waitq);
597	}
598	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
599	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
600	itv->cur_dma_stream = -1;
601	wake_up(&itv->dma_waitq);
602}
603
604static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
605{
606	u32 data[CX2341X_MBOX_MAX_DATA];
607	struct ivtv_stream *s;
608
609	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
610	IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
611
612	del_timer(&itv->dma_timer);
613
614	if (itv->cur_dma_stream < 0)
615		return;
616
617	s = &itv->streams[itv->cur_dma_stream];
618	ivtv_stream_sync_for_cpu(s);
619
620	if (data[0] & 0x18) {
621		IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
622			s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
623		write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
624		if (itv->dma_retries == 3) {
625			/* Too many retries, give up on this frame */
626			itv->dma_retries = 0;
627			s->sg_processed = s->sg_processing_size;
628		}
629		else {
630			/* Retry, starting with the first xfer segment.
631			   Just retrying the current segment is not sufficient. */
632			s->sg_processed = 0;
633			itv->dma_retries++;
634		}
635	}
636	if (s->sg_processed < s->sg_processing_size) {
637		/* DMA next buffer */
638		ivtv_dma_enc_start_xfer(s);
639		return;
640	}
641	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
642	itv->cur_dma_stream = -1;
643	dma_post(s);
644	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
645		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
646		dma_post(s);
647	}
648	s->sg_processing_size = 0;
649	s->sg_processed = 0;
650	wake_up(&itv->dma_waitq);
651}
652
653static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
654{
655	struct ivtv_stream *s;
656
657	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
658		itv->cur_pio_stream = -1;
659		return;
660	}
661	s = &itv->streams[itv->cur_pio_stream];
662	IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
663	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
664	itv->cur_pio_stream = -1;
665	dma_post(s);
666	if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
667		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
668	else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
669		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
670	else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
671		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
672	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
673	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
674		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
675		dma_post(s);
676	}
677	wake_up(&itv->dma_waitq);
678}
679
680static void ivtv_irq_dma_err(struct ivtv *itv)
681{
682	u32 data[CX2341X_MBOX_MAX_DATA];
683	u32 status;
684
685	del_timer(&itv->dma_timer);
686
687	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
688	status = read_reg(IVTV_REG_DMASTATUS);
689	IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
690				status, itv->cur_dma_stream);
691	/*
692	 * We do *not* write back to the IVTV_REG_DMASTATUS register to
693	 * clear the error status, if either the encoder write (0x02) or
694	 * decoder read (0x01) bus master DMA operation do not indicate
695	 * completed.  We can race with the DMA engine, which may have
696	 * transitioned to completed status *after* we read the register.
697	 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
698	 * DMA engine has completed, will cause the DMA engine to stop working.
699	 */
700	status &= 0x3;
701	if (status == 0x3)
702		write_reg(status, IVTV_REG_DMASTATUS);
703
704	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
705	    itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
706		struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
707
708		if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
709			/* retry */
710			/*
711			 * FIXME - handle cases of DMA error similar to
712			 * encoder below, except conditioned on status & 0x1
713			 */
714			ivtv_dma_dec_start(s);
715			return;
716		} else {
717			if ((status & 0x2) == 0) {
718				/*
719				 * CX2341x Bus Master DMA write is ongoing.
720				 * Reset the timer and let it complete.
721				 */
722				itv->dma_timer.expires =
723						jiffies + msecs_to_jiffies(600);
724				add_timer(&itv->dma_timer);
725				return;
726			}
727
728			if (itv->dma_retries < 3) {
729				/*
730				 * CX2341x Bus Master DMA write has ended.
731				 * Retry the write, starting with the first
732				 * xfer segment. Just retrying the current
733				 * segment is not sufficient.
734				 */
735				s->sg_processed = 0;
736				itv->dma_retries++;
737				ivtv_dma_enc_start_xfer(s);
738				return;
739			}
740			/* Too many retries, give up on this one */
741		}
742
743	}
744	if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
745		ivtv_udma_start(itv);
746		return;
747	}
748	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
749	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
750	itv->cur_dma_stream = -1;
751	wake_up(&itv->dma_waitq);
752}
753
754static void ivtv_irq_enc_start_cap(struct ivtv *itv)
755{
756	u32 data[CX2341X_MBOX_MAX_DATA];
757	struct ivtv_stream *s;
758
759	/* Get DMA destination and size arguments from card */
760	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
761	IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
762
763	if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
764		IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
765				data[0], data[1], data[2]);
766		return;
767	}
768	s = &itv->streams[ivtv_stream_map[data[0]]];
769	if (!stream_enc_dma_append(s, data)) {
770		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
771	}
772}
773
774static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
775{
776	u32 data[CX2341X_MBOX_MAX_DATA];
777	struct ivtv_stream *s;
778
779	IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
780	s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
781
782	if (!stream_enc_dma_append(s, data))
783		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
784}
785
786static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
787{
788	u32 data[CX2341X_MBOX_MAX_DATA];
789	struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
790
791	IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
792	if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
793			!stream_enc_dma_append(s, data)) {
794		set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
795	}
796}
797
798static void ivtv_irq_dec_data_req(struct ivtv *itv)
799{
800	u32 data[CX2341X_MBOX_MAX_DATA];
801	struct ivtv_stream *s;
802
803	/* YUV or MPG */
804
805	if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
806		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
807		itv->dma_data_req_size =
808				 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
809		itv->dma_data_req_offset = data[1];
810		if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
811			ivtv_yuv_frame_complete(itv);
812		s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
813	}
814	else {
815		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
816		itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
817		itv->dma_data_req_offset = data[1];
818		s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
819	}
820	IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
821		       itv->dma_data_req_offset, itv->dma_data_req_size);
822	if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
823		set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
824	}
825	else {
826		if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
827			ivtv_yuv_setup_stream_frame(itv);
828		clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
829		ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
830		ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
831	}
832}
833
834static void ivtv_irq_vsync(struct ivtv *itv)
835{
836	/* The vsync interrupt is unusual in that it won't clear until
837	 * the end of the first line for the current field, at which
838	 * point it clears itself. This can result in repeated vsync
839	 * interrupts, or a missed vsync. Read some of the registers
840	 * to determine the line being displayed and ensure we handle
841	 * one vsync per frame.
842	 */
843	unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
844	struct yuv_playback_info *yi = &itv->yuv_info;
845	int last_dma_frame = atomic_read(&yi->next_dma_frame);
846	struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
847
848	if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
849
850	if (((frame ^ f->sync_field) == 0 &&
851		((itv->last_vsync_field & 1) ^ f->sync_field)) ||
852			(frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
853		int next_dma_frame = last_dma_frame;
854
855		if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
856			if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
857				write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
858				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
859				write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
860				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
861				next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
862				atomic_set(&yi->next_dma_frame, next_dma_frame);
863				yi->fields_lapsed = -1;
864				yi->running = 1;
865			}
866		}
867	}
868	if (frame != (itv->last_vsync_field & 1)) {
869		static const struct v4l2_event evtop = {
870			.type = V4L2_EVENT_VSYNC,
871			.u.vsync.field = V4L2_FIELD_TOP,
872		};
873		static const struct v4l2_event evbottom = {
874			.type = V4L2_EVENT_VSYNC,
875			.u.vsync.field = V4L2_FIELD_BOTTOM,
876		};
877		struct ivtv_stream *s = ivtv_get_output_stream(itv);
878
879		itv->last_vsync_field += 1;
880		if (frame == 0) {
881			clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
882			clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
883		}
884		else {
885			set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
886		}
887		if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
888			set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
889			wake_up(&itv->event_waitq);
890			if (s)
891				wake_up(&s->waitq);
892		}
893		if (s && s->vdev.v4l2_dev)
894			v4l2_event_queue(&s->vdev, frame ? &evtop : &evbottom);
895		wake_up(&itv->vsync_waitq);
896
897		/* Send VBI to saa7127 */
898		if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
899			test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
900			test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
901			test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
902			set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
903			set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
904		}
905
906		/* Check if we need to update the yuv registers */
907		if (yi->running && (yi->yuv_forced_update || f->update)) {
908			if (!f->update) {
909				last_dma_frame =
910					(u8)(atomic_read(&yi->next_dma_frame) -
911						 1) % IVTV_YUV_BUFFERS;
912				f = &yi->new_frame_info[last_dma_frame];
913			}
914
915			if (f->src_w) {
916				yi->update_frame = last_dma_frame;
917				f->update = 0;
918				yi->yuv_forced_update = 0;
919				set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
920				set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
921			}
922		}
923
924		yi->fields_lapsed++;
925	}
926}
927
928#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
929
930irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
931{
932	struct ivtv *itv = (struct ivtv *)dev_id;
933	u32 combo;
934	u32 stat;
935	int i;
936	u8 vsync_force = 0;
937
938	spin_lock(&itv->dma_reg_lock);
939	/* get contents of irq status register */
940	stat = read_reg(IVTV_REG_IRQSTATUS);
941
942	combo = ~itv->irqmask & stat;
943
944	/* Clear out IRQ */
945	if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
946
947	if (0 == combo) {
948		/* The vsync interrupt is unusual and clears itself. If we
949		 * took too long, we may have missed it. Do some checks
950		 */
951		if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
952			/* vsync is enabled, see if we're in a new field */
953			if ((itv->last_vsync_field & 1) !=
954			    (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
955				/* New field, looks like we missed it */
956				IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
957				       read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
958				vsync_force = 1;
959			}
960		}
961
962		if (!vsync_force) {
963			/* No Vsync expected, wasn't for us */
964			spin_unlock(&itv->dma_reg_lock);
965			return IRQ_NONE;
966		}
967	}
968
969	/* Exclude interrupts noted below from the output, otherwise the log is flooded with
970	   these messages */
971	if (combo & ~0xff6d0400)
972		IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
973
974	if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
975		IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
976	}
977
978	if (combo & IVTV_IRQ_DMA_READ) {
979		ivtv_irq_dma_read(itv);
980	}
981
982	if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
983		ivtv_irq_enc_dma_complete(itv);
984	}
985
986	if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
987		ivtv_irq_enc_pio_complete(itv);
988	}
989
990	if (combo & IVTV_IRQ_DMA_ERR) {
991		ivtv_irq_dma_err(itv);
992	}
993
994	if (combo & IVTV_IRQ_ENC_START_CAP) {
995		ivtv_irq_enc_start_cap(itv);
996	}
997
998	if (combo & IVTV_IRQ_ENC_VBI_CAP) {
999		ivtv_irq_enc_vbi_cap(itv);
1000	}
1001
1002	if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
1003		ivtv_irq_dec_vbi_reinsert(itv);
1004	}
1005
1006	if (combo & IVTV_IRQ_ENC_EOS) {
1007		IVTV_DEBUG_IRQ("ENC EOS\n");
1008		set_bit(IVTV_F_I_EOS, &itv->i_flags);
1009		wake_up(&itv->eos_waitq);
1010	}
1011
1012	if (combo & IVTV_IRQ_DEC_DATA_REQ) {
1013		ivtv_irq_dec_data_req(itv);
1014	}
1015
1016	/* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
1017	if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
1018		ivtv_irq_vsync(itv);
1019	}
1020
1021	if (combo & IVTV_IRQ_ENC_VIM_RST) {
1022		IVTV_DEBUG_IRQ("VIM RST\n");
1023		/*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
1024	}
1025
1026	if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
1027		IVTV_DEBUG_INFO("Stereo mode changed\n");
1028	}
1029
1030	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
1031		itv->irq_rr_idx++;
1032		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1033			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1034			struct ivtv_stream *s = &itv->streams[idx];
1035
1036			if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
1037				continue;
1038			if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
1039				ivtv_dma_dec_start(s);
1040			else
1041				ivtv_dma_enc_start(s);
1042			break;
1043		}
1044
1045		if (i == IVTV_MAX_STREAMS &&
1046		    test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
1047			ivtv_udma_start(itv);
1048	}
1049
1050	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
1051		itv->irq_rr_idx++;
1052		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1053			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1054			struct ivtv_stream *s = &itv->streams[idx];
1055
1056			if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
1057				continue;
1058			if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
1059				ivtv_dma_enc_start(s);
1060			break;
1061		}
1062	}
1063
1064	if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
1065		queue_kthread_work(&itv->irq_worker, &itv->irq_work);
1066	}
1067
1068	spin_unlock(&itv->dma_reg_lock);
1069
1070	/* If we've just handled a 'forced' vsync, it's safest to say it
1071	 * wasn't ours. Another device may have triggered it at just
1072	 * the right time.
1073	 */
1074	return vsync_force ? IRQ_NONE : IRQ_HANDLED;
1075}
1076
1077void ivtv_unfinished_dma(unsigned long arg)
1078{
1079	struct ivtv *itv = (struct ivtv *)arg;
1080
1081	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
1082		return;
1083	IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
1084
1085	write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1086	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1087	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1088	itv->cur_dma_stream = -1;
1089	wake_up(&itv->dma_waitq);
1090}
1091