]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/media/video/ivtv/ivtv-irq.c
V4L/DVB (6713): ivtv: ivtv_yuv_prep_frame breakup and yuv hardware buffer changes
[net-next-2.6.git] / drivers / media / video / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
1a0adaf3
HV
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
1a0adaf3
HV
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
1e13f9e3 27#include "ivtv-yuv.h"
1a0adaf3
HV
28
29#define DMA_MAGIC_COOKIE 0x000001fe
30
1a0adaf3
HV
31static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
38};
39
dc02d50a
HV
40
41static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 42{
dc02d50a
HV
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
dc02d50a
HV
45 int i = 0;
46
bd58df6d 47 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a
HV
48 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
49 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
50 itv->cur_pio_stream = -1;
51 /* trigger PIO complete user interrupt */
52 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53 return;
54 }
bd58df6d 55 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
805a4392 56 list_for_each_entry(buf, &s->q_dma.list, list) {
37093b1e 57 u32 size = s->sg_processing[i].size & 0x3ffff;
1a0adaf3 58
dc02d50a
HV
59 /* Copy the data from the card to the buffer */
60 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
37093b1e 61 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
dc02d50a
HV
62 }
63 else {
37093b1e 64 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
dc02d50a 65 }
dc02d50a 66 i++;
37093b1e
HV
67 if (i == s->sg_processing_size)
68 break;
dc02d50a
HV
69 }
70 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
71}
72
1e13f9e3
HV
73void ivtv_irq_work_handler(struct work_struct *work)
74{
75 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
76
77 DEFINE_WAIT(wait);
78
dc02d50a
HV
79 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
80 ivtv_pio_work_handler(itv);
81
1e13f9e3 82 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 83 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
84
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
86 ivtv_yuv_work_handler(itv);
87}
88
1a0adaf3
HV
89/* Determine the required DMA size, setup enough buffers in the predma queue and
90 actually copy the data from the card to the buffers in case a PIO transfer is
91 required for this stream.
92 */
93static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
94{
95 struct ivtv *itv = s->itv;
96 struct ivtv_buffer *buf;
1a0adaf3
HV
97 u32 bytes_needed = 0;
98 u32 offset, size;
99 u32 UVoffset = 0, UVsize = 0;
100 int skip_bufs = s->q_predma.buffers;
37093b1e 101 int idx = s->sg_pending_size;
1a0adaf3
HV
102 int rc;
103
104 /* sanity checks */
105 if (s->v4l2dev == NULL) {
106 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
107 return -1;
108 }
109 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
110 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
111 return -1;
112 }
113
114 /* determine offset, size and PTS for the various streams */
115 switch (s->type) {
116 case IVTV_ENC_STREAM_TYPE_MPG:
117 offset = data[1];
118 size = data[2];
37093b1e 119 s->pending_pts = 0;
1a0adaf3
HV
120 break;
121
122 case IVTV_ENC_STREAM_TYPE_YUV:
123 offset = data[1];
124 size = data[2];
125 UVoffset = data[3];
126 UVsize = data[4];
37093b1e 127 s->pending_pts = ((u64) data[5] << 32) | data[6];
1a0adaf3
HV
128 break;
129
130 case IVTV_ENC_STREAM_TYPE_PCM:
131 offset = data[1] + 12;
132 size = data[2] - 12;
37093b1e 133 s->pending_pts = read_dec(offset - 8) |
1a0adaf3
HV
134 ((u64)(read_dec(offset - 12)) << 32);
135 if (itv->has_cx23415)
136 offset += IVTV_DECODER_OFFSET;
137 break;
138
139 case IVTV_ENC_STREAM_TYPE_VBI:
140 size = itv->vbi.enc_size * itv->vbi.fpi;
141 offset = read_enc(itv->vbi.enc_start - 4) + 12;
142 if (offset == 12) {
143 IVTV_DEBUG_INFO("VBI offset == 0\n");
144 return -1;
145 }
37093b1e 146 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
1a0adaf3
HV
147 break;
148
149 case IVTV_DEC_STREAM_TYPE_VBI:
150 size = read_dec(itv->vbi.dec_start + 4) + 8;
151 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
37093b1e 152 s->pending_pts = 0;
1a0adaf3
HV
153 offset += IVTV_DECODER_OFFSET;
154 break;
155 default:
156 /* shouldn't happen */
157 return -1;
158 }
159
160 /* if this is the start of the DMA then fill in the magic cookie */
51a99c04 161 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
162 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
163 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
37093b1e 164 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
1a0adaf3
HV
165 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
166 }
167 else {
37093b1e 168 s->pending_backup = read_enc(offset);
1a0adaf3
HV
169 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
170 }
37093b1e 171 s->pending_offset = offset;
1a0adaf3
HV
172 }
173
174 bytes_needed = size;
175 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
176 /* The size for the Y samples needs to be rounded upwards to a
177 multiple of the buf_size. The UV samples then start in the
178 next buffer. */
179 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
180 bytes_needed += UVsize;
181 }
182
bd58df6d 183 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
184 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
185
186 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
187 if (rc < 0) { /* Insufficient buffers */
188 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
189 bytes_needed, s->name);
190 return -1;
191 }
192 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
193 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
194 IVTV_WARN("Cause: the application is not reading fast enough.\n");
195 }
196 s->buffers_stolen = rc;
197
37093b1e 198 /* got the buffers, now fill in sg_pending */
1a0adaf3
HV
199 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
200 memset(buf->buf, 0, 128);
805a4392 201 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3
HV
202 if (skip_bufs-- > 0)
203 continue;
37093b1e
HV
204 s->sg_pending[idx].dst = buf->dma_handle;
205 s->sg_pending[idx].src = offset;
206 s->sg_pending[idx].size = s->buf_size;
1a0adaf3 207 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
f4071b85 208 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 209
1a0adaf3
HV
210 s->q_predma.bytesused += buf->bytesused;
211 size -= buf->bytesused;
212 offset += s->buf_size;
213
214 /* Sync SG buffers */
215 ivtv_buf_sync_for_device(s, buf);
216
217 if (size == 0) { /* YUV */
218 /* process the UV section */
219 offset = UVoffset;
220 size = UVsize;
221 }
222 idx++;
223 }
37093b1e 224 s->sg_pending_size = idx;
1a0adaf3
HV
225 return 0;
226}
227
228static void dma_post(struct ivtv_stream *s)
229{
230 struct ivtv *itv = s->itv;
231 struct ivtv_buffer *buf = NULL;
232 struct list_head *p;
233 u32 offset;
234 u32 *u32buf;
235 int x = 0;
236
bd58df6d 237 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
238 s->name, s->dma_offset);
239 list_for_each(p, &s->q_dma.list) {
240 buf = list_entry(p, struct ivtv_buffer, list);
241 u32buf = (u32 *)buf->buf;
242
243 /* Sync Buffer */
244 ivtv_buf_sync_for_cpu(s, buf);
245
51a99c04 246 if (x == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
247 offset = s->dma_last_offset;
248 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
249 {
250 for (offset = 0; offset < 64; offset++) {
251 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
252 break;
253 }
254 }
255 offset *= 4;
256 if (offset == 256) {
257 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
258 offset = s->dma_last_offset;
259 }
260 if (s->dma_last_offset != offset)
261 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
262 s->dma_last_offset = offset;
263 }
264 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
265 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
266 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
267 }
268 else {
269 write_enc_sync(0, s->dma_offset);
270 }
271 if (offset) {
272 buf->bytesused -= offset;
273 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
274 }
275 *u32buf = cpu_to_le32(s->dma_backup);
276 }
277 x++;
278 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
279 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
280 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 281 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
282 }
283 if (buf)
284 buf->bytesused += s->dma_last_offset;
285 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
805a4392 286 list_for_each_entry(buf, &s->q_dma.list, list) {
dc02d50a
HV
287 /* Parse and Groom VBI Data */
288 s->q_dma.bytesused -= buf->bytesused;
289 ivtv_process_vbi_data(itv, buf, 0, s->type);
290 s->q_dma.bytesused += buf->bytesused;
291 }
1a0adaf3
HV
292 if (s->id == -1) {
293 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
294 return;
295 }
296 }
297 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
298 if (s->id != -1)
299 wake_up(&s->waitq);
300}
301
302void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
303{
304 struct ivtv *itv = s->itv;
305 struct ivtv_buffer *buf;
1a0adaf3
HV
306 u32 y_size = itv->params.height * itv->params.width;
307 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
308 int y_done = 0;
309 int bytes_written = 0;
310 unsigned long flags = 0;
311 int idx = 0;
312
bd58df6d 313 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
805a4392 314 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3 315 /* YUV UV Offset from Y Buffer */
c240ad00
IA
316 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
317 (bytes_written + buf->bytesused) >= y_size) {
318 s->sg_pending[idx].src = buf->dma_handle;
319 s->sg_pending[idx].dst = offset;
320 s->sg_pending[idx].size = y_size - bytes_written;
1a0adaf3 321 offset = uv_offset;
c240ad00
IA
322 if (s->sg_pending[idx].size != buf->bytesused) {
323 idx++;
324 s->sg_pending[idx].src =
325 buf->dma_handle + s->sg_pending[idx - 1].size;
326 s->sg_pending[idx].dst = offset;
327 s->sg_pending[idx].size =
328 buf->bytesused - s->sg_pending[idx - 1].size;
329 offset += s->sg_pending[idx].size;
330 }
1a0adaf3 331 y_done = 1;
c240ad00
IA
332 } else {
333 s->sg_pending[idx].src = buf->dma_handle;
334 s->sg_pending[idx].dst = offset;
335 s->sg_pending[idx].size = buf->bytesused;
336 offset += buf->bytesused;
1a0adaf3 337 }
1a0adaf3
HV
338 bytes_written += buf->bytesused;
339
340 /* Sync SG buffers */
341 ivtv_buf_sync_for_device(s, buf);
342 idx++;
343 }
37093b1e 344 s->sg_pending_size = idx;
1a0adaf3
HV
345
346 /* Sync Hardware SG List of buffers */
347 ivtv_stream_sync_for_device(s);
348 if (lock)
349 spin_lock_irqsave(&itv->dma_reg_lock, flags);
350 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
351 ivtv_dma_dec_start(s);
352 }
353 else {
354 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
355 }
356 if (lock)
357 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
358}
359
37093b1e
HV
360static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
361{
362 struct ivtv *itv = s->itv;
363
364 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
365 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
366 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
367 s->sg_processed++;
368 /* Sync Hardware SG List of buffers */
369 ivtv_stream_sync_for_device(s);
370 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
371 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
372}
373
374static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
375{
376 struct ivtv *itv = s->itv;
377
378 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
379 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
380 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
381 s->sg_processed++;
382 /* Sync Hardware SG List of buffers */
383 ivtv_stream_sync_for_device(s);
384 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
385 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
386}
387
1a0adaf3
HV
388/* start the encoder DMA */
389static void ivtv_dma_enc_start(struct ivtv_stream *s)
390{
391 struct ivtv *itv = s->itv;
392 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
393 int i;
394
bd58df6d 395 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 396
1a0adaf3
HV
397 if (s->q_predma.bytesused)
398 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
399
400 if (ivtv_use_dma(s))
37093b1e 401 s->sg_pending[s->sg_pending_size - 1].size += 256;
1a0adaf3
HV
402
403 /* If this is an MPEG stream, and VBI data is also pending, then append the
404 VBI DMA to the MPEG DMA and transfer both sets of data at once.
405
406 VBI DMA is a second class citizen compared to MPEG and mixing them together
407 will confuse the firmware (the end of a VBI DMA is seen as the end of a
408 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
409 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
410 use. This way no conflicts occur. */
411 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
37093b1e
HV
412 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
413 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
1a0adaf3 414 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a 415 if (ivtv_use_dma(s_vbi))
37093b1e
HV
416 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
417 for (i = 0; i < s_vbi->sg_pending_size; i++) {
418 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
1a0adaf3 419 }
37093b1e
HV
420 s_vbi->dma_offset = s_vbi->pending_offset;
421 s_vbi->sg_pending_size = 0;
f4071b85 422 s_vbi->dma_xfer_cnt++;
1a0adaf3 423 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
bd58df6d 424 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
1a0adaf3
HV
425 }
426
f4071b85 427 s->dma_xfer_cnt++;
37093b1e
HV
428 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
429 s->sg_processing_size = s->sg_pending_size;
430 s->sg_pending_size = 0;
431 s->sg_processed = 0;
432 s->dma_offset = s->pending_offset;
433 s->dma_backup = s->pending_backup;
434 s->dma_pts = s->pending_pts;
dd1e729d 435
dc02d50a 436 if (ivtv_use_pio(s)) {
dc02d50a
HV
437 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
438 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
439 set_bit(IVTV_F_I_PIO, &itv->i_flags);
440 itv->cur_pio_stream = s->type;
441 }
442 else {
37093b1e
HV
443 itv->dma_retries = 0;
444 ivtv_dma_enc_start_xfer(s);
dc02d50a
HV
445 set_bit(IVTV_F_I_DMA, &itv->i_flags);
446 itv->cur_dma_stream = s->type;
201700d3 447 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
dc02d50a
HV
448 add_timer(&itv->dma_timer);
449 }
1a0adaf3
HV
450}
451
452static void ivtv_dma_dec_start(struct ivtv_stream *s)
453{
454 struct ivtv *itv = s->itv;
455
456 if (s->q_predma.bytesused)
457 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
37093b1e
HV
458 s->dma_xfer_cnt++;
459 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
460 s->sg_processing_size = s->sg_pending_size;
461 s->sg_pending_size = 0;
462 s->sg_processed = 0;
463
bd58df6d 464 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
37093b1e
HV
465 itv->dma_retries = 0;
466 ivtv_dma_dec_start_xfer(s);
1a0adaf3
HV
467 set_bit(IVTV_F_I_DMA, &itv->i_flags);
468 itv->cur_dma_stream = s->type;
201700d3 469 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
1a0adaf3
HV
470 add_timer(&itv->dma_timer);
471}
472
473static void ivtv_irq_dma_read(struct ivtv *itv)
474{
475 struct ivtv_stream *s = NULL;
476 struct ivtv_buffer *buf;
37093b1e 477 int hw_stream_type = 0;
1a0adaf3 478
bd58df6d 479 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
37093b1e
HV
480 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
481 del_timer(&itv->dma_timer);
482 return;
1a0adaf3 483 }
37093b1e 484
1a0adaf3 485 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
37093b1e
HV
486 s = &itv->streams[itv->cur_dma_stream];
487 ivtv_stream_sync_for_cpu(s);
488
489 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
490 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
491 read_reg(IVTV_REG_DMASTATUS),
492 s->sg_processed, s->sg_processing_size, itv->dma_retries);
493 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
494 if (itv->dma_retries == 3) {
e17a06ba 495 /* Too many retries, give up on this frame */
37093b1e 496 itv->dma_retries = 0;
e17a06ba 497 s->sg_processed = s->sg_processing_size;
37093b1e
HV
498 }
499 else {
500 /* Retry, starting with the first xfer segment.
501 Just retrying the current segment is not sufficient. */
502 s->sg_processed = 0;
503 itv->dma_retries++;
504 }
1a0adaf3 505 }
37093b1e
HV
506 if (s->sg_processed < s->sg_processing_size) {
507 /* DMA next buffer */
508 ivtv_dma_dec_start_xfer(s);
509 return;
1a0adaf3 510 }
37093b1e
HV
511 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512 hw_stream_type = 2;
bd58df6d 513 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3 514
1a0adaf3
HV
515 /* For some reason must kick the firmware, like PIO mode,
516 I think this tells the firmware we are done and the size
517 of the xfer so it can calculate what we need next.
518 I think we can do this part ourselves but would have to
519 fully calculate xfer info ourselves and not use interrupts
520 */
521 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
522 hw_stream_type);
523
524 /* Free last DMA call */
525 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
526 ivtv_buf_sync_for_cpu(s, buf);
527 ivtv_enqueue(s, buf, &s->q_free);
528 }
529 wake_up(&s->waitq);
530 }
37093b1e 531 del_timer(&itv->dma_timer);
1a0adaf3
HV
532 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
533 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
534 itv->cur_dma_stream = -1;
535 wake_up(&itv->dma_waitq);
536}
537
538static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
539{
540 u32 data[CX2341X_MBOX_MAX_DATA];
541 struct ivtv_stream *s;
542
1a0adaf3 543 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
37093b1e
HV
544 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
545 if (itv->cur_dma_stream < 0) {
546 del_timer(&itv->dma_timer);
1a0adaf3 547 return;
37093b1e
HV
548 }
549 s = &itv->streams[itv->cur_dma_stream];
550 ivtv_stream_sync_for_cpu(s);
551
1a0adaf3 552 if (data[0] & 0x18) {
37093b1e
HV
553 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
554 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
1a0adaf3 555 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
37093b1e 556 if (itv->dma_retries == 3) {
e17a06ba 557 /* Too many retries, give up on this frame */
37093b1e 558 itv->dma_retries = 0;
e17a06ba 559 s->sg_processed = s->sg_processing_size;
37093b1e
HV
560 }
561 else {
562 /* Retry, starting with the first xfer segment.
563 Just retrying the current segment is not sufficient. */
564 s->sg_processed = 0;
565 itv->dma_retries++;
566 }
1a0adaf3 567 }
37093b1e
HV
568 if (s->sg_processed < s->sg_processing_size) {
569 /* DMA next buffer */
570 ivtv_dma_enc_start_xfer(s);
571 return;
572 }
573 del_timer(&itv->dma_timer);
1a0adaf3
HV
574 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
575 itv->cur_dma_stream = -1;
576 dma_post(s);
1a0adaf3 577 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
1a0adaf3 578 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
1a0adaf3 579 dma_post(s);
1a0adaf3 580 }
37093b1e
HV
581 s->sg_processing_size = 0;
582 s->sg_processed = 0;
1a0adaf3
HV
583 wake_up(&itv->dma_waitq);
584}
585
dc02d50a
HV
586static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
587{
588 struct ivtv_stream *s;
589
590 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
591 itv->cur_pio_stream = -1;
592 return;
593 }
594 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 595 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
596 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
597 itv->cur_pio_stream = -1;
598 dma_post(s);
599 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
600 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
601 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
602 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
603 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
604 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
605 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
606 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
dc02d50a 607 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dc02d50a 608 dma_post(s);
dc02d50a
HV
609 }
610 wake_up(&itv->dma_waitq);
611}
612
1a0adaf3
HV
613static void ivtv_irq_dma_err(struct ivtv *itv)
614{
615 u32 data[CX2341X_MBOX_MAX_DATA];
616
617 del_timer(&itv->dma_timer);
618 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
619 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
37093b1e
HV
620 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
621 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1a0adaf3
HV
622 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
623 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
624 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
625
626 /* retry */
1a0adaf3
HV
627 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
628 ivtv_dma_dec_start(s);
629 else
630 ivtv_dma_enc_start(s);
631 return;
632 }
37093b1e
HV
633 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
634 ivtv_udma_start(itv);
635 return;
636 }
1a0adaf3
HV
637 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
638 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
639 itv->cur_dma_stream = -1;
640 wake_up(&itv->dma_waitq);
641}
642
643static void ivtv_irq_enc_start_cap(struct ivtv *itv)
644{
645 u32 data[CX2341X_MBOX_MAX_DATA];
646 struct ivtv_stream *s;
647
648 /* Get DMA destination and size arguments from card */
649 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
bd58df6d 650 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
651
652 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
653 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
654 data[0], data[1], data[2]);
655 return;
656 }
1a0adaf3
HV
657 s = &itv->streams[ivtv_stream_map[data[0]]];
658 if (!stream_enc_dma_append(s, data)) {
dc02d50a 659 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
660 }
661}
662
663static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
664{
665 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
666 u32 data[CX2341X_MBOX_MAX_DATA];
667 struct ivtv_stream *s;
668
bd58df6d 669 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
670 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
671
1a0adaf3
HV
672 /* If more than two VBI buffers are pending, then
673 clear the old ones and start with this new one.
674 This can happen during transition stages when MPEG capturing is
675 started, but the first interrupts haven't arrived yet. During
676 that period VBI requests can accumulate without being able to
677 DMA the data. Since at most four VBI DMA buffers are available,
678 we just drop the old requests when there are already three
679 requests queued. */
37093b1e 680 if (s->sg_pending_size > 2) {
805a4392
TP
681 struct ivtv_buffer *buf;
682 list_for_each_entry(buf, &s->q_predma.list, list)
1a0adaf3 683 ivtv_buf_sync_for_cpu(s, buf);
1a0adaf3 684 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
37093b1e 685 s->sg_pending_size = 0;
1a0adaf3
HV
686 }
687 /* if we can append the data, and the MPEG stream isn't capturing,
688 then start a DMA request for just the VBI data. */
689 if (!stream_enc_dma_append(s, data) &&
690 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
dc02d50a 691 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
692 }
693}
694
dc02d50a 695static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
696{
697 u32 data[CX2341X_MBOX_MAX_DATA];
698 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
699
bd58df6d 700 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
701 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
702 !stream_enc_dma_append(s, data)) {
dc02d50a 703 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
704 }
705}
706
707static void ivtv_irq_dec_data_req(struct ivtv *itv)
708{
709 u32 data[CX2341X_MBOX_MAX_DATA];
710 struct ivtv_stream *s;
711
712 /* YUV or MPG */
713 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
714
715 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
716 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
717 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
718 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
719 }
720 else {
721 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
722 itv->dma_data_req_offset = data[1];
723 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
724 }
bd58df6d 725 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
726 itv->dma_data_req_offset, itv->dma_data_req_size);
727 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
728 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
729 }
730 else {
731 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
732 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
733 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
734 }
735}
736
737static void ivtv_irq_vsync(struct ivtv *itv)
738{
739 /* The vsync interrupt is unusual in that it won't clear until
740 * the end of the first line for the current field, at which
741 * point it clears itself. This can result in repeated vsync
742 * interrupts, or a missed vsync. Read some of the registers
743 * to determine the line being displayed and ensure we handle
744 * one vsync per frame.
745 */
746 unsigned int frame = read_reg(0x28c0) & 1;
a3e5f5e2 747 struct yuv_playback_info *yi = &itv->yuv_info;
1a0adaf3
HV
748 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
749
750 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
751
a3e5f5e2
IA
752 if (((frame ^ yi->sync_field[last_dma_frame]) == 0 &&
753 ((itv->last_vsync_field & 1) ^ yi->sync_field[last_dma_frame])) ||
754 (frame != (itv->last_vsync_field & 1) && !yi->frame_interlaced)) {
1a0adaf3
HV
755 int next_dma_frame = last_dma_frame;
756
a3e5f5e2
IA
757 if (!(yi->frame_interlaced && yi->field_delay[next_dma_frame] && yi->fields_lapsed < 1)) {
758 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
bfd7beac
IA
759 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
760 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
761 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
762 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
a3e5f5e2
IA
763 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
764 atomic_set(&yi->next_dma_frame, next_dma_frame);
765 yi->fields_lapsed = -1;
bfd7beac 766 }
1a0adaf3
HV
767 }
768 }
a158f355 769 if (frame != (itv->last_vsync_field & 1)) {
1a0adaf3
HV
770 struct ivtv_stream *s = ivtv_get_output_stream(itv);
771
a158f355 772 itv->last_vsync_field += 1;
1a0adaf3
HV
773 if (frame == 0) {
774 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
775 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
776 }
777 else {
778 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
779 }
780 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
781 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
782 wake_up(&itv->event_waitq);
783 }
784 wake_up(&itv->vsync_waitq);
785 if (s)
786 wake_up(&s->waitq);
787
788 /* Send VBI to saa7127 */
2f3a9893
HV
789 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
790 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
791 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
792 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
1e13f9e3 793 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 794 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 795 }
1a0adaf3
HV
796
797 /* Check if we need to update the yuv registers */
a3e5f5e2
IA
798 if ((yi->yuv_forced_update || yi->new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
799 if (!yi->new_frame_info[last_dma_frame].update)
1a0adaf3
HV
800 last_dma_frame = (last_dma_frame - 1) & 3;
801
a3e5f5e2
IA
802 if (yi->new_frame_info[last_dma_frame].src_w) {
803 yi->update_frame = last_dma_frame;
804 yi->new_frame_info[last_dma_frame].update = 0;
805 yi->yuv_forced_update = 0;
1e13f9e3 806 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 807 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
808 }
809 }
bfd7beac 810
a3e5f5e2 811 yi->fields_lapsed++;
1a0adaf3
HV
812 }
813}
814
2f3a9893 815#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
1a0adaf3
HV
816
817irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
818{
819 struct ivtv *itv = (struct ivtv *)dev_id;
820 u32 combo;
821 u32 stat;
822 int i;
823 u8 vsync_force = 0;
824
825 spin_lock(&itv->dma_reg_lock);
826 /* get contents of irq status register */
827 stat = read_reg(IVTV_REG_IRQSTATUS);
828
829 combo = ~itv->irqmask & stat;
830
831 /* Clear out IRQ */
832 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
833
834 if (0 == combo) {
835 /* The vsync interrupt is unusual and clears itself. If we
836 * took too long, we may have missed it. Do some checks
837 */
838 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
839 /* vsync is enabled, see if we're in a new field */
a158f355 840 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
1a0adaf3
HV
841 /* New field, looks like we missed it */
842 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
843 vsync_force = 1;
844 }
845 }
846
847 if (!vsync_force) {
848 /* No Vsync expected, wasn't for us */
849 spin_unlock(&itv->dma_reg_lock);
850 return IRQ_NONE;
851 }
852 }
853
854 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
855 these messages */
856 if (combo & ~0xff6d0400)
bd58df6d 857 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
858
859 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 860 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
861 }
862
863 if (combo & IVTV_IRQ_DMA_READ) {
864 ivtv_irq_dma_read(itv);
865 }
866
867 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
868 ivtv_irq_enc_dma_complete(itv);
869 }
870
dc02d50a
HV
871 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
872 ivtv_irq_enc_pio_complete(itv);
873 }
874
1a0adaf3
HV
875 if (combo & IVTV_IRQ_DMA_ERR) {
876 ivtv_irq_dma_err(itv);
877 }
878
879 if (combo & IVTV_IRQ_ENC_START_CAP) {
880 ivtv_irq_enc_start_cap(itv);
881 }
882
883 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
884 ivtv_irq_enc_vbi_cap(itv);
885 }
886
887 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 888 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
889 }
890
891 if (combo & IVTV_IRQ_ENC_EOS) {
892 IVTV_DEBUG_IRQ("ENC EOS\n");
893 set_bit(IVTV_F_I_EOS, &itv->i_flags);
fd8b281a 894 wake_up(&itv->eos_waitq);
1a0adaf3
HV
895 }
896
897 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
898 ivtv_irq_dec_data_req(itv);
899 }
900
901 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
902 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
903 ivtv_irq_vsync(itv);
904 }
905
906 if (combo & IVTV_IRQ_ENC_VIM_RST) {
907 IVTV_DEBUG_IRQ("VIM RST\n");
908 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
909 }
910
911 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
912 IVTV_DEBUG_INFO("Stereo mode changed\n");
913 }
914
915 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 916 itv->irq_rr_idx++;
1a0adaf3 917 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 918 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
919 struct ivtv_stream *s = &itv->streams[idx];
920
921 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
922 continue;
923 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
924 ivtv_dma_dec_start(s);
925 else
926 ivtv_dma_enc_start(s);
927 break;
928 }
929 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
930 ivtv_udma_start(itv);
931 }
932 }
933
dc02d50a 934 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 935 itv->irq_rr_idx++;
dc02d50a 936 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 937 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
938 struct ivtv_stream *s = &itv->streams[idx];
939
940 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
941 continue;
942 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
943 ivtv_dma_enc_start(s);
944 break;
945 }
946 }
947
2f3a9893 948 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
dc02d50a 949 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
2f3a9893 950 }
dc02d50a 951
1a0adaf3
HV
952 spin_unlock(&itv->dma_reg_lock);
953
954 /* If we've just handled a 'forced' vsync, it's safest to say it
955 * wasn't ours. Another device may have triggered it at just
956 * the right time.
957 */
958 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
959}
960
961void ivtv_unfinished_dma(unsigned long arg)
962{
963 struct ivtv *itv = (struct ivtv *)arg;
964
965 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
966 return;
967 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
968
969 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
970 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
971 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
972 itv->cur_dma_stream = -1;
973 wake_up(&itv->dma_waitq);
974}