]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/media/video/ivtv/ivtv-irq.c
V4L/DVB: ivtv: Fix ivtv_api_get_data() to avoid unneeded IO during IRQ handling
[net-next-2.6.git] / drivers / media / video / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
1a0adaf3
HV
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
1a0adaf3
HV
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
1e13f9e3 27#include "ivtv-yuv.h"
1a0adaf3
HV
28
29#define DMA_MAGIC_COOKIE 0x000001fe
30
1a0adaf3
HV
31static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
38};
39
dc02d50a
HV
40
41static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 42{
dc02d50a
HV
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
dc02d50a
HV
45 int i = 0;
46
bd58df6d 47 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a 48 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
8ac05ae3 49 s->vdev == NULL || !ivtv_use_pio(s)) {
dc02d50a
HV
50 itv->cur_pio_stream = -1;
51 /* trigger PIO complete user interrupt */
52 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53 return;
54 }
bd58df6d 55 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
805a4392 56 list_for_each_entry(buf, &s->q_dma.list, list) {
37093b1e 57 u32 size = s->sg_processing[i].size & 0x3ffff;
1a0adaf3 58
dc02d50a
HV
59 /* Copy the data from the card to the buffer */
60 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
37093b1e 61 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
dc02d50a
HV
62 }
63 else {
37093b1e 64 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
dc02d50a 65 }
dc02d50a 66 i++;
37093b1e
HV
67 if (i == s->sg_processing_size)
68 break;
dc02d50a
HV
69 }
70 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
71}
72
1e13f9e3
HV
73void ivtv_irq_work_handler(struct work_struct *work)
74{
75 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
76
77 DEFINE_WAIT(wait);
78
d526afe0
HV
79 if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
80 struct sched_param param = { .sched_priority = 99 };
81
82 /* This thread must use the FIFO scheduler as it
83 is realtime sensitive. */
84 sched_setscheduler(current, SCHED_FIFO, &param);
85 }
dc02d50a
HV
86 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
87 ivtv_pio_work_handler(itv);
88
1e13f9e3 89 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 90 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
91
92 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
93 ivtv_yuv_work_handler(itv);
94}
95
1a0adaf3
HV
96/* Determine the required DMA size, setup enough buffers in the predma queue and
97 actually copy the data from the card to the buffers in case a PIO transfer is
98 required for this stream.
99 */
100static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
101{
102 struct ivtv *itv = s->itv;
103 struct ivtv_buffer *buf;
1a0adaf3
HV
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
37093b1e 108 int idx = s->sg_pending_size;
1a0adaf3
HV
109 int rc;
110
111 /* sanity checks */
8ac05ae3 112 if (s->vdev == NULL) {
1a0adaf3
HV
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
37093b1e 126 s->pending_pts = 0;
1a0adaf3
HV
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
37093b1e 134 s->pending_pts = ((u64) data[5] << 32) | data[6];
1a0adaf3
HV
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
37093b1e 140 s->pending_pts = read_dec(offset - 8) |
1a0adaf3
HV
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
37093b1e 153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
1a0adaf3
HV
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
37093b1e 159 s->pending_pts = 0;
1a0adaf3
HV
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
51a99c04 168 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
37093b1e 171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
1a0adaf3
HV
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
37093b1e 175 s->pending_backup = read_enc(offset);
1a0adaf3
HV
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
37093b1e 178 s->pending_offset = offset;
1a0adaf3
HV
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
bd58df6d 190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
ec105a42 199 if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
1a0adaf3
HV
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
37093b1e 205 /* got the buffers, now fill in sg_pending */
1a0adaf3
HV
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
805a4392 208 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3
HV
209 if (skip_bufs-- > 0)
210 continue;
37093b1e
HV
211 s->sg_pending[idx].dst = buf->dma_handle;
212 s->sg_pending[idx].src = offset;
213 s->sg_pending[idx].size = s->buf_size;
14d5deba 214 buf->bytesused = min(size, s->buf_size);
f4071b85 215 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 216
1a0adaf3
HV
217 s->q_predma.bytesused += buf->bytesused;
218 size -= buf->bytesused;
219 offset += s->buf_size;
220
221 /* Sync SG buffers */
222 ivtv_buf_sync_for_device(s, buf);
223
224 if (size == 0) { /* YUV */
225 /* process the UV section */
226 offset = UVoffset;
227 size = UVsize;
228 }
229 idx++;
230 }
37093b1e 231 s->sg_pending_size = idx;
1a0adaf3
HV
232 return 0;
233}
234
235static void dma_post(struct ivtv_stream *s)
236{
237 struct ivtv *itv = s->itv;
238 struct ivtv_buffer *buf = NULL;
239 struct list_head *p;
240 u32 offset;
b0510f8d 241 __le32 *u32buf;
1a0adaf3
HV
242 int x = 0;
243
bd58df6d 244 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
245 s->name, s->dma_offset);
246 list_for_each(p, &s->q_dma.list) {
247 buf = list_entry(p, struct ivtv_buffer, list);
b0510f8d 248 u32buf = (__le32 *)buf->buf;
1a0adaf3
HV
249
250 /* Sync Buffer */
251 ivtv_buf_sync_for_cpu(s, buf);
252
51a99c04 253 if (x == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
254 offset = s->dma_last_offset;
255 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
256 {
257 for (offset = 0; offset < 64; offset++) {
258 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
259 break;
260 }
261 }
262 offset *= 4;
263 if (offset == 256) {
264 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
265 offset = s->dma_last_offset;
266 }
267 if (s->dma_last_offset != offset)
268 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
269 s->dma_last_offset = offset;
270 }
271 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
272 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
273 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
274 }
275 else {
276 write_enc_sync(0, s->dma_offset);
277 }
278 if (offset) {
279 buf->bytesused -= offset;
280 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
281 }
282 *u32buf = cpu_to_le32(s->dma_backup);
283 }
284 x++;
285 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
286 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
287 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 288 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
289 }
290 if (buf)
291 buf->bytesused += s->dma_last_offset;
292 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
805a4392 293 list_for_each_entry(buf, &s->q_dma.list, list) {
dc02d50a
HV
294 /* Parse and Groom VBI Data */
295 s->q_dma.bytesused -= buf->bytesused;
296 ivtv_process_vbi_data(itv, buf, 0, s->type);
297 s->q_dma.bytesused += buf->bytesused;
298 }
1a0adaf3
HV
299 if (s->id == -1) {
300 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
301 return;
302 }
303 }
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
305 if (s->id != -1)
306 wake_up(&s->waitq);
307}
308
309void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
310{
311 struct ivtv *itv = s->itv;
77aded6b
IA
312 struct yuv_playback_info *yi = &itv->yuv_info;
313 u8 frame = yi->draw_frame;
314 struct yuv_frame_info *f = &yi->new_frame_info[frame];
1a0adaf3 315 struct ivtv_buffer *buf;
77aded6b 316 u32 y_size = 720 * ((f->src_h + 31) & ~31);
1a0adaf3
HV
317 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
318 int y_done = 0;
319 int bytes_written = 0;
320 unsigned long flags = 0;
321 int idx = 0;
322
bd58df6d 323 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
77aded6b
IA
324
325 /* Insert buffer block for YUV if needed */
326 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
327 if (yi->blanking_dmaptr) {
328 s->sg_pending[idx].src = yi->blanking_dmaptr;
329 s->sg_pending[idx].dst = offset;
330 s->sg_pending[idx].size = 720 * 16;
331 }
332 offset += 720 * 16;
333 idx++;
334 }
335
805a4392 336 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3 337 /* YUV UV Offset from Y Buffer */
c240ad00
IA
338 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
339 (bytes_written + buf->bytesused) >= y_size) {
340 s->sg_pending[idx].src = buf->dma_handle;
341 s->sg_pending[idx].dst = offset;
342 s->sg_pending[idx].size = y_size - bytes_written;
1a0adaf3 343 offset = uv_offset;
c240ad00
IA
344 if (s->sg_pending[idx].size != buf->bytesused) {
345 idx++;
346 s->sg_pending[idx].src =
347 buf->dma_handle + s->sg_pending[idx - 1].size;
348 s->sg_pending[idx].dst = offset;
349 s->sg_pending[idx].size =
350 buf->bytesused - s->sg_pending[idx - 1].size;
351 offset += s->sg_pending[idx].size;
352 }
1a0adaf3 353 y_done = 1;
c240ad00
IA
354 } else {
355 s->sg_pending[idx].src = buf->dma_handle;
356 s->sg_pending[idx].dst = offset;
357 s->sg_pending[idx].size = buf->bytesused;
358 offset += buf->bytesused;
1a0adaf3 359 }
1a0adaf3
HV
360 bytes_written += buf->bytesused;
361
362 /* Sync SG buffers */
363 ivtv_buf_sync_for_device(s, buf);
364 idx++;
365 }
37093b1e 366 s->sg_pending_size = idx;
1a0adaf3
HV
367
368 /* Sync Hardware SG List of buffers */
369 ivtv_stream_sync_for_device(s);
370 if (lock)
371 spin_lock_irqsave(&itv->dma_reg_lock, flags);
372 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
373 ivtv_dma_dec_start(s);
374 }
375 else {
376 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
377 }
378 if (lock)
379 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
380}
381
37093b1e
HV
382static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
383{
384 struct ivtv *itv = s->itv;
385
386 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
387 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
388 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
389 s->sg_processed++;
390 /* Sync Hardware SG List of buffers */
391 ivtv_stream_sync_for_device(s);
392 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
393 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
2968e313 394 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 395 add_timer(&itv->dma_timer);
37093b1e
HV
396}
397
398static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
399{
400 struct ivtv *itv = s->itv;
401
402 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
403 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
404 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
405 s->sg_processed++;
406 /* Sync Hardware SG List of buffers */
407 ivtv_stream_sync_for_device(s);
408 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
409 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
2968e313 410 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 411 add_timer(&itv->dma_timer);
37093b1e
HV
412}
413
1a0adaf3
HV
414/* start the encoder DMA */
415static void ivtv_dma_enc_start(struct ivtv_stream *s)
416{
417 struct ivtv *itv = s->itv;
418 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
419 int i;
420
bd58df6d 421 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 422
1a0adaf3
HV
423 if (s->q_predma.bytesused)
424 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
425
426 if (ivtv_use_dma(s))
37093b1e 427 s->sg_pending[s->sg_pending_size - 1].size += 256;
1a0adaf3
HV
428
429 /* If this is an MPEG stream, and VBI data is also pending, then append the
430 VBI DMA to the MPEG DMA and transfer both sets of data at once.
431
432 VBI DMA is a second class citizen compared to MPEG and mixing them together
433 will confuse the firmware (the end of a VBI DMA is seen as the end of a
434 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
435 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
436 use. This way no conflicts occur. */
437 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
37093b1e
HV
438 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
439 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
1a0adaf3 440 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a 441 if (ivtv_use_dma(s_vbi))
37093b1e
HV
442 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
443 for (i = 0; i < s_vbi->sg_pending_size; i++) {
444 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
1a0adaf3 445 }
37093b1e
HV
446 s_vbi->dma_offset = s_vbi->pending_offset;
447 s_vbi->sg_pending_size = 0;
f4071b85 448 s_vbi->dma_xfer_cnt++;
1a0adaf3 449 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
6b1e5676 450 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
1a0adaf3
HV
451 }
452
f4071b85 453 s->dma_xfer_cnt++;
b0510f8d 454 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
455 s->sg_processing_size = s->sg_pending_size;
456 s->sg_pending_size = 0;
457 s->sg_processed = 0;
458 s->dma_offset = s->pending_offset;
459 s->dma_backup = s->pending_backup;
460 s->dma_pts = s->pending_pts;
dd1e729d 461
dc02d50a 462 if (ivtv_use_pio(s)) {
dc02d50a
HV
463 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
464 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
465 set_bit(IVTV_F_I_PIO, &itv->i_flags);
466 itv->cur_pio_stream = s->type;
467 }
468 else {
37093b1e
HV
469 itv->dma_retries = 0;
470 ivtv_dma_enc_start_xfer(s);
dc02d50a
HV
471 set_bit(IVTV_F_I_DMA, &itv->i_flags);
472 itv->cur_dma_stream = s->type;
dc02d50a 473 }
1a0adaf3
HV
474}
475
476static void ivtv_dma_dec_start(struct ivtv_stream *s)
477{
478 struct ivtv *itv = s->itv;
479
480 if (s->q_predma.bytesused)
481 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
37093b1e 482 s->dma_xfer_cnt++;
b0510f8d 483 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
484 s->sg_processing_size = s->sg_pending_size;
485 s->sg_pending_size = 0;
486 s->sg_processed = 0;
487
bd58df6d 488 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
37093b1e
HV
489 itv->dma_retries = 0;
490 ivtv_dma_dec_start_xfer(s);
1a0adaf3
HV
491 set_bit(IVTV_F_I_DMA, &itv->i_flags);
492 itv->cur_dma_stream = s->type;
1a0adaf3
HV
493}
494
495static void ivtv_irq_dma_read(struct ivtv *itv)
496{
497 struct ivtv_stream *s = NULL;
498 struct ivtv_buffer *buf;
37093b1e 499 int hw_stream_type = 0;
1a0adaf3 500
bd58df6d 501 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
9b2e5c6b
HV
502
503 del_timer(&itv->dma_timer);
504
505 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
37093b1e 506 return;
37093b1e 507
1a0adaf3 508 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
37093b1e
HV
509 s = &itv->streams[itv->cur_dma_stream];
510 ivtv_stream_sync_for_cpu(s);
511
512 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
513 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
514 read_reg(IVTV_REG_DMASTATUS),
515 s->sg_processed, s->sg_processing_size, itv->dma_retries);
516 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
517 if (itv->dma_retries == 3) {
e17a06ba 518 /* Too many retries, give up on this frame */
37093b1e 519 itv->dma_retries = 0;
e17a06ba 520 s->sg_processed = s->sg_processing_size;
37093b1e
HV
521 }
522 else {
523 /* Retry, starting with the first xfer segment.
524 Just retrying the current segment is not sufficient. */
525 s->sg_processed = 0;
526 itv->dma_retries++;
527 }
1a0adaf3 528 }
37093b1e
HV
529 if (s->sg_processed < s->sg_processing_size) {
530 /* DMA next buffer */
531 ivtv_dma_dec_start_xfer(s);
532 return;
1a0adaf3 533 }
37093b1e
HV
534 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
535 hw_stream_type = 2;
bd58df6d 536 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3 537
1a0adaf3
HV
538 /* For some reason must kick the firmware, like PIO mode,
539 I think this tells the firmware we are done and the size
540 of the xfer so it can calculate what we need next.
541 I think we can do this part ourselves but would have to
542 fully calculate xfer info ourselves and not use interrupts
543 */
544 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
545 hw_stream_type);
546
547 /* Free last DMA call */
548 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
549 ivtv_buf_sync_for_cpu(s, buf);
550 ivtv_enqueue(s, buf, &s->q_free);
551 }
552 wake_up(&s->waitq);
553 }
554 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
555 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
556 itv->cur_dma_stream = -1;
557 wake_up(&itv->dma_waitq);
558}
559
560static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
561{
562 u32 data[CX2341X_MBOX_MAX_DATA];
563 struct ivtv_stream *s;
564
587808d5 565 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
37093b1e 566 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
9b2e5c6b
HV
567
568 del_timer(&itv->dma_timer);
569
570 if (itv->cur_dma_stream < 0)
1a0adaf3 571 return;
9b2e5c6b 572
37093b1e
HV
573 s = &itv->streams[itv->cur_dma_stream];
574 ivtv_stream_sync_for_cpu(s);
575
1a0adaf3 576 if (data[0] & 0x18) {
37093b1e
HV
577 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
578 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
1a0adaf3 579 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
37093b1e 580 if (itv->dma_retries == 3) {
e17a06ba 581 /* Too many retries, give up on this frame */
37093b1e 582 itv->dma_retries = 0;
e17a06ba 583 s->sg_processed = s->sg_processing_size;
37093b1e
HV
584 }
585 else {
586 /* Retry, starting with the first xfer segment.
587 Just retrying the current segment is not sufficient. */
588 s->sg_processed = 0;
589 itv->dma_retries++;
590 }
1a0adaf3 591 }
37093b1e
HV
592 if (s->sg_processed < s->sg_processing_size) {
593 /* DMA next buffer */
594 ivtv_dma_enc_start_xfer(s);
595 return;
596 }
1a0adaf3
HV
597 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
598 itv->cur_dma_stream = -1;
599 dma_post(s);
1a0adaf3 600 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
1a0adaf3 601 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
1a0adaf3 602 dma_post(s);
1a0adaf3 603 }
37093b1e
HV
604 s->sg_processing_size = 0;
605 s->sg_processed = 0;
1a0adaf3
HV
606 wake_up(&itv->dma_waitq);
607}
608
dc02d50a
HV
609static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
610{
611 struct ivtv_stream *s;
612
613 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
614 itv->cur_pio_stream = -1;
615 return;
616 }
617 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 618 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
619 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
620 itv->cur_pio_stream = -1;
621 dma_post(s);
622 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
623 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
624 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
625 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
626 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
627 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
628 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
629 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
dc02d50a 630 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dc02d50a 631 dma_post(s);
dc02d50a
HV
632 }
633 wake_up(&itv->dma_waitq);
634}
635
1a0adaf3
HV
636static void ivtv_irq_dma_err(struct ivtv *itv)
637{
638 u32 data[CX2341X_MBOX_MAX_DATA];
639
640 del_timer(&itv->dma_timer);
587808d5 641 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
1a0adaf3 642 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
37093b1e
HV
643 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
644 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1a0adaf3
HV
645 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
646 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
647 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
648
649 /* retry */
1a0adaf3
HV
650 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
651 ivtv_dma_dec_start(s);
652 else
653 ivtv_dma_enc_start(s);
654 return;
655 }
37093b1e
HV
656 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
657 ivtv_udma_start(itv);
658 return;
659 }
1a0adaf3
HV
660 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
661 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
662 itv->cur_dma_stream = -1;
663 wake_up(&itv->dma_waitq);
664}
665
666static void ivtv_irq_enc_start_cap(struct ivtv *itv)
667{
668 u32 data[CX2341X_MBOX_MAX_DATA];
669 struct ivtv_stream *s;
670
671 /* Get DMA destination and size arguments from card */
587808d5 672 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
bd58df6d 673 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
674
675 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
676 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
677 data[0], data[1], data[2]);
678 return;
679 }
1a0adaf3
HV
680 s = &itv->streams[ivtv_stream_map[data[0]]];
681 if (!stream_enc_dma_append(s, data)) {
dc02d50a 682 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
683 }
684}
685
686static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
687{
1a0adaf3
HV
688 u32 data[CX2341X_MBOX_MAX_DATA];
689 struct ivtv_stream *s;
690
bd58df6d 691 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
692 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
693
d526afe0 694 if (!stream_enc_dma_append(s, data))
dc02d50a 695 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
696}
697
dc02d50a 698static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
699{
700 u32 data[CX2341X_MBOX_MAX_DATA];
701 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
702
bd58df6d 703 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
704 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
705 !stream_enc_dma_append(s, data)) {
dc02d50a 706 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
707 }
708}
709
710static void ivtv_irq_dec_data_req(struct ivtv *itv)
711{
712 u32 data[CX2341X_MBOX_MAX_DATA];
713 struct ivtv_stream *s;
714
715 /* YUV or MPG */
1a0adaf3
HV
716
717 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
587808d5 718 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
77aded6b
IA
719 itv->dma_data_req_size =
720 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
721 itv->dma_data_req_offset = data[1];
722 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
723 ivtv_yuv_frame_complete(itv);
1a0adaf3
HV
724 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
725 }
726 else {
587808d5 727 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
14d5deba 728 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
1a0adaf3
HV
729 itv->dma_data_req_offset = data[1];
730 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
731 }
bd58df6d 732 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
733 itv->dma_data_req_offset, itv->dma_data_req_size);
734 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
735 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
736 }
737 else {
77aded6b
IA
738 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
739 ivtv_yuv_setup_stream_frame(itv);
1a0adaf3
HV
740 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
741 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
742 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
743 }
744}
745
746static void ivtv_irq_vsync(struct ivtv *itv)
747{
748 /* The vsync interrupt is unusual in that it won't clear until
749 * the end of the first line for the current field, at which
750 * point it clears itself. This can result in repeated vsync
751 * interrupts, or a missed vsync. Read some of the registers
752 * to determine the line being displayed and ensure we handle
753 * one vsync per frame.
754 */
755 unsigned int frame = read_reg(0x28c0) & 1;
a3e5f5e2 756 struct yuv_playback_info *yi = &itv->yuv_info;
2bd7ac55 757 int last_dma_frame = atomic_read(&yi->next_dma_frame);
3b5c1c8e 758 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
1a0adaf3
HV
759
760 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
761
3b5c1c8e
IA
762 if (((frame ^ f->sync_field) == 0 &&
763 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
764 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
1a0adaf3
HV
765 int next_dma_frame = last_dma_frame;
766
3b5c1c8e 767 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
a3e5f5e2 768 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
bfd7beac
IA
769 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
770 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
771 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
772 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
a3e5f5e2
IA
773 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
774 atomic_set(&yi->next_dma_frame, next_dma_frame);
775 yi->fields_lapsed = -1;
2bd7ac55 776 yi->running = 1;
bfd7beac 777 }
1a0adaf3
HV
778 }
779 }
a158f355 780 if (frame != (itv->last_vsync_field & 1)) {
1a0adaf3
HV
781 struct ivtv_stream *s = ivtv_get_output_stream(itv);
782
a158f355 783 itv->last_vsync_field += 1;
1a0adaf3
HV
784 if (frame == 0) {
785 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
786 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
787 }
788 else {
789 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
790 }
791 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
792 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
793 wake_up(&itv->event_waitq);
794 }
795 wake_up(&itv->vsync_waitq);
796 if (s)
797 wake_up(&s->waitq);
798
799 /* Send VBI to saa7127 */
2f3a9893
HV
800 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
801 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
802 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
803 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
1e13f9e3 804 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 805 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 806 }
1a0adaf3
HV
807
808 /* Check if we need to update the yuv registers */
2bd7ac55 809 if (yi->running && (yi->yuv_forced_update || f->update)) {
3b5c1c8e 810 if (!f->update) {
2bd7ac55
IA
811 last_dma_frame =
812 (u8)(atomic_read(&yi->next_dma_frame) -
813 1) % IVTV_YUV_BUFFERS;
3b5c1c8e
IA
814 f = &yi->new_frame_info[last_dma_frame];
815 }
1a0adaf3 816
3b5c1c8e 817 if (f->src_w) {
a3e5f5e2 818 yi->update_frame = last_dma_frame;
3b5c1c8e 819 f->update = 0;
a3e5f5e2 820 yi->yuv_forced_update = 0;
1e13f9e3 821 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 822 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
823 }
824 }
bfd7beac 825
a3e5f5e2 826 yi->fields_lapsed++;
1a0adaf3
HV
827 }
828}
829
2f3a9893 830#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
1a0adaf3
HV
831
832irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
833{
834 struct ivtv *itv = (struct ivtv *)dev_id;
835 u32 combo;
836 u32 stat;
837 int i;
838 u8 vsync_force = 0;
839
840 spin_lock(&itv->dma_reg_lock);
841 /* get contents of irq status register */
842 stat = read_reg(IVTV_REG_IRQSTATUS);
843
844 combo = ~itv->irqmask & stat;
845
846 /* Clear out IRQ */
847 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
848
849 if (0 == combo) {
850 /* The vsync interrupt is unusual and clears itself. If we
851 * took too long, we may have missed it. Do some checks
852 */
853 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
854 /* vsync is enabled, see if we're in a new field */
a158f355 855 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
1a0adaf3
HV
856 /* New field, looks like we missed it */
857 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
858 vsync_force = 1;
859 }
860 }
861
862 if (!vsync_force) {
863 /* No Vsync expected, wasn't for us */
864 spin_unlock(&itv->dma_reg_lock);
865 return IRQ_NONE;
866 }
867 }
868
869 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
870 these messages */
871 if (combo & ~0xff6d0400)
bd58df6d 872 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
873
874 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 875 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
876 }
877
878 if (combo & IVTV_IRQ_DMA_READ) {
879 ivtv_irq_dma_read(itv);
880 }
881
882 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
883 ivtv_irq_enc_dma_complete(itv);
884 }
885
dc02d50a
HV
886 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
887 ivtv_irq_enc_pio_complete(itv);
888 }
889
1a0adaf3
HV
890 if (combo & IVTV_IRQ_DMA_ERR) {
891 ivtv_irq_dma_err(itv);
892 }
893
894 if (combo & IVTV_IRQ_ENC_START_CAP) {
895 ivtv_irq_enc_start_cap(itv);
896 }
897
898 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
899 ivtv_irq_enc_vbi_cap(itv);
900 }
901
902 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 903 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
904 }
905
906 if (combo & IVTV_IRQ_ENC_EOS) {
907 IVTV_DEBUG_IRQ("ENC EOS\n");
908 set_bit(IVTV_F_I_EOS, &itv->i_flags);
fd8b281a 909 wake_up(&itv->eos_waitq);
1a0adaf3
HV
910 }
911
912 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
913 ivtv_irq_dec_data_req(itv);
914 }
915
916 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
917 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
918 ivtv_irq_vsync(itv);
919 }
920
921 if (combo & IVTV_IRQ_ENC_VIM_RST) {
922 IVTV_DEBUG_IRQ("VIM RST\n");
923 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
924 }
925
926 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
927 IVTV_DEBUG_INFO("Stereo mode changed\n");
928 }
929
930 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 931 itv->irq_rr_idx++;
1a0adaf3 932 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 933 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
934 struct ivtv_stream *s = &itv->streams[idx];
935
936 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
937 continue;
938 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
939 ivtv_dma_dec_start(s);
940 else
941 ivtv_dma_enc_start(s);
942 break;
943 }
b6e436b2
IA
944
945 if (i == IVTV_MAX_STREAMS &&
946 test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
1a0adaf3 947 ivtv_udma_start(itv);
1a0adaf3
HV
948 }
949
dc02d50a 950 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 951 itv->irq_rr_idx++;
dc02d50a 952 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 953 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
954 struct ivtv_stream *s = &itv->streams[idx];
955
956 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
957 continue;
958 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
959 ivtv_dma_enc_start(s);
960 break;
961 }
962 }
963
2f3a9893 964 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
dc02d50a 965 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
2f3a9893 966 }
dc02d50a 967
1a0adaf3
HV
968 spin_unlock(&itv->dma_reg_lock);
969
970 /* If we've just handled a 'forced' vsync, it's safest to say it
971 * wasn't ours. Another device may have triggered it at just
972 * the right time.
973 */
974 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
975}
976
977void ivtv_unfinished_dma(unsigned long arg)
978{
979 struct ivtv *itv = (struct ivtv *)arg;
980
981 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
982 return;
983 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
984
985 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
986 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
987 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
988 itv->cur_dma_stream = -1;
989 wake_up(&itv->dma_waitq);
990}