]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/media/video/ivtv/ivtv-irq.c
V4L/DVB (5973): ivtv: attach yuv field order to each frame
[net-next-2.6.git] / drivers / media / video / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-firmware.h"
23#include "ivtv-fileops.h"
24#include "ivtv-queue.h"
25#include "ivtv-udma.h"
26#include "ivtv-irq.h"
27#include "ivtv-ioctl.h"
28#include "ivtv-mailbox.h"
29#include "ivtv-vbi.h"
1e13f9e3 30#include "ivtv-yuv.h"
1a0adaf3
HV
31
32#define DMA_MAGIC_COOKIE 0x000001fe
33
1a0adaf3
HV
34static void ivtv_dma_dec_start(struct ivtv_stream *s);
35
36static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
41};
42
dc02d50a
HV
43
44static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 45{
dc02d50a
HV
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
48 struct list_head *p;
49 int i = 0;
50
bd58df6d 51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a
HV
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57 return;
58 }
bd58df6d 59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
dc02d50a
HV
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff;
1a0adaf3 64
dc02d50a
HV
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size);
68 }
69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size);
71 }
72 if (s->PIOarray[i].size & 0x80000000)
73 break;
74 i++;
75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
77}
78
1e13f9e3
HV
79void ivtv_irq_work_handler(struct work_struct *work)
80{
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82
83 DEFINE_WAIT(wait);
84
dc02d50a
HV
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
87
1e13f9e3 88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 89 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
90
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
93}
94
1a0adaf3
HV
95/* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
98 */
99static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
100{
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
103 struct list_head *p;
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length;
109 int rc;
110
111 /* sanity checks */
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
126 s->dma_pts = 0;
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
134 s->dma_pts = ((u64) data[5] << 32) | data[6];
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
140 s->dma_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->dma_pts = 0;
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
175 s->dma_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
178 s->dma_offset = offset;
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
bd58df6d 190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
dc02d50a 205 /* got the buffers, now fill in SGarray (DMA) */
1a0adaf3
HV
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210
211 if (skip_bufs-- > 0)
212 continue;
dc02d50a
HV
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
214 s->SGarray[idx].src = cpu_to_le32(offset);
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
1a0adaf3
HV
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217
1a0adaf3
HV
218 s->q_predma.bytesused += buf->bytesused;
219 size -= buf->bytesused;
220 offset += s->buf_size;
221
222 /* Sync SG buffers */
223 ivtv_buf_sync_for_device(s, buf);
224
225 if (size == 0) { /* YUV */
226 /* process the UV section */
227 offset = UVoffset;
228 size = UVsize;
229 }
230 idx++;
231 }
232 s->SG_length = idx;
233 return 0;
234}
235
236static void dma_post(struct ivtv_stream *s)
237{
238 struct ivtv *itv = s->itv;
239 struct ivtv_buffer *buf = NULL;
240 struct list_head *p;
241 u32 offset;
242 u32 *u32buf;
243 int x = 0;
244
bd58df6d 245 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
246 s->name, s->dma_offset);
247 list_for_each(p, &s->q_dma.list) {
248 buf = list_entry(p, struct ivtv_buffer, list);
249 u32buf = (u32 *)buf->buf;
250
251 /* Sync Buffer */
252 ivtv_buf_sync_for_cpu(s, buf);
253
254 if (x == 0) {
255 offset = s->dma_last_offset;
256 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
257 {
258 for (offset = 0; offset < 64; offset++) {
259 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
260 break;
261 }
262 }
263 offset *= 4;
264 if (offset == 256) {
265 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
266 offset = s->dma_last_offset;
267 }
268 if (s->dma_last_offset != offset)
269 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
270 s->dma_last_offset = offset;
271 }
272 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
273 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
274 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
275 }
276 else {
277 write_enc_sync(0, s->dma_offset);
278 }
279 if (offset) {
280 buf->bytesused -= offset;
281 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
282 }
283 *u32buf = cpu_to_le32(s->dma_backup);
284 }
285 x++;
286 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
287 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
288 s->type == IVTV_ENC_STREAM_TYPE_VBI)
289 set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
290 }
291 if (buf)
292 buf->bytesused += s->dma_last_offset;
293 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
dc02d50a
HV
294 list_for_each(p, &s->q_dma.list) {
295 buf = list_entry(p, struct ivtv_buffer, list);
296
297 /* Parse and Groom VBI Data */
298 s->q_dma.bytesused -= buf->bytesused;
299 ivtv_process_vbi_data(itv, buf, 0, s->type);
300 s->q_dma.bytesused += buf->bytesused;
301 }
1a0adaf3
HV
302 if (s->id == -1) {
303 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
304 return;
305 }
306 }
307 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
308 if (s->id != -1)
309 wake_up(&s->waitq);
310}
311
312void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
313{
314 struct ivtv *itv = s->itv;
315 struct ivtv_buffer *buf;
316 struct list_head *p;
317 u32 y_size = itv->params.height * itv->params.width;
318 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
319 int y_done = 0;
320 int bytes_written = 0;
321 unsigned long flags = 0;
322 int idx = 0;
323
bd58df6d 324 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
1a0adaf3
HV
325 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
326 list_for_each(p, &s->q_predma.list) {
327 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
328
329 /* YUV UV Offset from Y Buffer */
330 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
331 offset = uv_offset;
332 y_done = 1;
333 }
334 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
335 s->SGarray[idx].dst = cpu_to_le32(offset);
336 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
337
338 offset += buf->bytesused;
339 bytes_written += buf->bytesused;
340
341 /* Sync SG buffers */
342 ivtv_buf_sync_for_device(s, buf);
343 idx++;
344 }
345 s->SG_length = idx;
346
347 /* Mark last buffer size for Interrupt flag */
348 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
349
350 /* Sync Hardware SG List of buffers */
351 ivtv_stream_sync_for_device(s);
352 if (lock)
353 spin_lock_irqsave(&itv->dma_reg_lock, flags);
354 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
355 ivtv_dma_dec_start(s);
356 }
357 else {
358 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
359 }
360 if (lock)
361 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
362}
363
364/* start the encoder DMA */
365static void ivtv_dma_enc_start(struct ivtv_stream *s)
366{
367 struct ivtv *itv = s->itv;
368 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
369 int i;
370
bd58df6d 371 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 372
1a0adaf3
HV
373 if (s->q_predma.bytesused)
374 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
375
376 if (ivtv_use_dma(s))
377 s->SGarray[s->SG_length - 1].size =
378 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
1a0adaf3
HV
379
380 /* If this is an MPEG stream, and VBI data is also pending, then append the
381 VBI DMA to the MPEG DMA and transfer both sets of data at once.
382
383 VBI DMA is a second class citizen compared to MPEG and mixing them together
384 will confuse the firmware (the end of a VBI DMA is seen as the end of a
385 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
386 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
387 use. This way no conflicts occur. */
388 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
389 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
390 s->SG_length + s_vbi->SG_length <= s->buffers) {
391 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a
HV
392 if (ivtv_use_dma(s_vbi))
393 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
1a0adaf3
HV
394 for (i = 0; i < s_vbi->SG_length; i++) {
395 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
396 }
397 itv->vbi.dma_offset = s_vbi->dma_offset;
398 s_vbi->SG_length = 0;
399 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
bd58df6d 400 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
1a0adaf3
HV
401 }
402
403 /* Mark last buffer size for Interrupt flag */
404 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
405
dd1e729d
HV
406 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
407 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
408 else
409 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
410
dc02d50a
HV
411 if (ivtv_use_pio(s)) {
412 for (i = 0; i < s->SG_length; i++) {
413 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
414 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
415 }
416 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
417 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
418 set_bit(IVTV_F_I_PIO, &itv->i_flags);
419 itv->cur_pio_stream = s->type;
420 }
421 else {
422 /* Sync Hardware SG List of buffers */
423 ivtv_stream_sync_for_device(s);
424 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
425 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
426 set_bit(IVTV_F_I_DMA, &itv->i_flags);
427 itv->cur_dma_stream = s->type;
201700d3 428 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
dc02d50a
HV
429 add_timer(&itv->dma_timer);
430 }
1a0adaf3
HV
431}
432
433static void ivtv_dma_dec_start(struct ivtv_stream *s)
434{
435 struct ivtv *itv = s->itv;
436
437 if (s->q_predma.bytesused)
438 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
bd58df6d 439 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
1a0adaf3
HV
440 /* put SG Handle into register 0x0c */
441 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
442 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
443 set_bit(IVTV_F_I_DMA, &itv->i_flags);
444 itv->cur_dma_stream = s->type;
201700d3 445 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
1a0adaf3
HV
446 add_timer(&itv->dma_timer);
447}
448
449static void ivtv_irq_dma_read(struct ivtv *itv)
450{
451 struct ivtv_stream *s = NULL;
452 struct ivtv_buffer *buf;
453 int hw_stream_type;
454
bd58df6d 455 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
1a0adaf3
HV
456 del_timer(&itv->dma_timer);
457 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
458 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
459 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
460 }
461 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
462 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
463 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
464 hw_stream_type = 2;
465 }
466 else {
467 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
468 hw_stream_type = 0;
469 }
bd58df6d 470 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3
HV
471
472 ivtv_stream_sync_for_cpu(s);
473
474 /* For some reason must kick the firmware, like PIO mode,
475 I think this tells the firmware we are done and the size
476 of the xfer so it can calculate what we need next.
477 I think we can do this part ourselves but would have to
478 fully calculate xfer info ourselves and not use interrupts
479 */
480 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
481 hw_stream_type);
482
483 /* Free last DMA call */
484 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
485 ivtv_buf_sync_for_cpu(s, buf);
486 ivtv_enqueue(s, buf, &s->q_free);
487 }
488 wake_up(&s->waitq);
489 }
490 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
491 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
492 itv->cur_dma_stream = -1;
493 wake_up(&itv->dma_waitq);
494}
495
496static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
497{
498 u32 data[CX2341X_MBOX_MAX_DATA];
499 struct ivtv_stream *s;
500
501 del_timer(&itv->dma_timer);
502 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
bd58df6d 503 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
1a0adaf3
HV
504 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
505 data[1] = 3;
506 else if (data[1] > 2)
507 return;
508 s = &itv->streams[ivtv_stream_map[data[1]]];
509 if (data[0] & 0x18) {
510 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
511 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
512 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
513 }
514 s->SG_length = 0;
515 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
516 itv->cur_dma_stream = -1;
517 dma_post(s);
518 ivtv_stream_sync_for_cpu(s);
519 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
520 u32 tmp;
521
522 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
523 tmp = s->dma_offset;
524 s->dma_offset = itv->vbi.dma_offset;
525 dma_post(s);
526 s->dma_offset = tmp;
527 }
528 wake_up(&itv->dma_waitq);
529}
530
dc02d50a
HV
531static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
532{
533 struct ivtv_stream *s;
534
535 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
536 itv->cur_pio_stream = -1;
537 return;
538 }
539 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 540 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
541 s->SG_length = 0;
542 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
543 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
544 itv->cur_pio_stream = -1;
545 dma_post(s);
546 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
547 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
548 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
549 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
550 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
551 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
552 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
553 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
554 u32 tmp;
555
556 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
557 tmp = s->dma_offset;
558 s->dma_offset = itv->vbi.dma_offset;
559 dma_post(s);
560 s->dma_offset = tmp;
561 }
562 wake_up(&itv->dma_waitq);
563}
564
1a0adaf3
HV
565static void ivtv_irq_dma_err(struct ivtv *itv)
566{
567 u32 data[CX2341X_MBOX_MAX_DATA];
568
569 del_timer(&itv->dma_timer);
570 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
571 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
572 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
573 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
574 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
575 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
576
577 /* retry */
578 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
579 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
580 ivtv_dma_dec_start(s);
581 else
582 ivtv_dma_enc_start(s);
583 return;
584 }
585 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
586 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
587 itv->cur_dma_stream = -1;
588 wake_up(&itv->dma_waitq);
589}
590
591static void ivtv_irq_enc_start_cap(struct ivtv *itv)
592{
593 u32 data[CX2341X_MBOX_MAX_DATA];
594 struct ivtv_stream *s;
595
596 /* Get DMA destination and size arguments from card */
597 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
bd58df6d 598 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
599
600 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
601 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
602 data[0], data[1], data[2]);
603 return;
604 }
1a0adaf3
HV
605 s = &itv->streams[ivtv_stream_map[data[0]]];
606 if (!stream_enc_dma_append(s, data)) {
dc02d50a 607 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
608 }
609}
610
611static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
612{
613 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
614 u32 data[CX2341X_MBOX_MAX_DATA];
615 struct ivtv_stream *s;
616
bd58df6d 617 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
618 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
619
1a0adaf3
HV
620 /* If more than two VBI buffers are pending, then
621 clear the old ones and start with this new one.
622 This can happen during transition stages when MPEG capturing is
623 started, but the first interrupts haven't arrived yet. During
624 that period VBI requests can accumulate without being able to
625 DMA the data. Since at most four VBI DMA buffers are available,
626 we just drop the old requests when there are already three
627 requests queued. */
628 if (s->SG_length > 2) {
629 struct list_head *p;
630 list_for_each(p, &s->q_predma.list) {
631 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
632 ivtv_buf_sync_for_cpu(s, buf);
633 }
634 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
635 s->SG_length = 0;
636 }
637 /* if we can append the data, and the MPEG stream isn't capturing,
638 then start a DMA request for just the VBI data. */
639 if (!stream_enc_dma_append(s, data) &&
640 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
dc02d50a 641 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
642 }
643}
644
dc02d50a 645static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
646{
647 u32 data[CX2341X_MBOX_MAX_DATA];
648 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
649
bd58df6d 650 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
651 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
652 !stream_enc_dma_append(s, data)) {
dc02d50a 653 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
654 }
655}
656
657static void ivtv_irq_dec_data_req(struct ivtv *itv)
658{
659 u32 data[CX2341X_MBOX_MAX_DATA];
660 struct ivtv_stream *s;
661
662 /* YUV or MPG */
663 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
664
665 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
666 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
667 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
668 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
669 }
670 else {
671 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
672 itv->dma_data_req_offset = data[1];
673 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
674 }
bd58df6d 675 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
676 itv->dma_data_req_offset, itv->dma_data_req_size);
677 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
678 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
679 }
680 else {
681 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
682 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
683 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
684 }
685}
686
687static void ivtv_irq_vsync(struct ivtv *itv)
688{
689 /* The vsync interrupt is unusual in that it won't clear until
690 * the end of the first line for the current field, at which
691 * point it clears itself. This can result in repeated vsync
692 * interrupts, or a missed vsync. Read some of the registers
693 * to determine the line being displayed and ensure we handle
694 * one vsync per frame.
695 */
696 unsigned int frame = read_reg(0x28c0) & 1;
697 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
698
699 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
700
bfd7beac
IA
701 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
702 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
1a0adaf3
HV
703 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
704 int next_dma_frame = last_dma_frame;
705
bfd7beac
IA
706 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
707 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
708 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
709 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
710 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
711 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
712 next_dma_frame = (next_dma_frame + 1) & 0x3;
713 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
714 itv->yuv_info.fields_lapsed = -1;
715 }
1a0adaf3
HV
716 }
717 }
718 if (frame != (itv->lastVsyncFrame & 1)) {
719 struct ivtv_stream *s = ivtv_get_output_stream(itv);
720
721 itv->lastVsyncFrame += 1;
722 if (frame == 0) {
723 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
724 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
725 }
726 else {
727 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
728 }
729 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
730 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
731 wake_up(&itv->event_waitq);
732 }
733 wake_up(&itv->vsync_waitq);
734 if (s)
735 wake_up(&s->waitq);
736
737 /* Send VBI to saa7127 */
1e13f9e3
HV
738 if (frame) {
739 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 740 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 741 }
1a0adaf3
HV
742
743 /* Check if we need to update the yuv registers */
744 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
745 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
746 last_dma_frame = (last_dma_frame - 1) & 3;
747
748 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
749 itv->yuv_info.update_frame = last_dma_frame;
750 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
751 itv->yuv_info.yuv_forced_update = 0;
1e13f9e3 752 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 753 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
754 }
755 }
bfd7beac
IA
756
757 itv->yuv_info.fields_lapsed ++;
1a0adaf3
HV
758 }
759}
760
761#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
762
763irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
764{
765 struct ivtv *itv = (struct ivtv *)dev_id;
766 u32 combo;
767 u32 stat;
768 int i;
769 u8 vsync_force = 0;
770
771 spin_lock(&itv->dma_reg_lock);
772 /* get contents of irq status register */
773 stat = read_reg(IVTV_REG_IRQSTATUS);
774
775 combo = ~itv->irqmask & stat;
776
777 /* Clear out IRQ */
778 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
779
780 if (0 == combo) {
781 /* The vsync interrupt is unusual and clears itself. If we
782 * took too long, we may have missed it. Do some checks
783 */
784 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
785 /* vsync is enabled, see if we're in a new field */
786 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
787 /* New field, looks like we missed it */
788 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
789 vsync_force = 1;
790 }
791 }
792
793 if (!vsync_force) {
794 /* No Vsync expected, wasn't for us */
795 spin_unlock(&itv->dma_reg_lock);
796 return IRQ_NONE;
797 }
798 }
799
800 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
801 these messages */
802 if (combo & ~0xff6d0400)
bd58df6d 803 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
804
805 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 806 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
807 }
808
809 if (combo & IVTV_IRQ_DMA_READ) {
810 ivtv_irq_dma_read(itv);
811 }
812
813 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
814 ivtv_irq_enc_dma_complete(itv);
815 }
816
dc02d50a
HV
817 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
818 ivtv_irq_enc_pio_complete(itv);
819 }
820
1a0adaf3
HV
821 if (combo & IVTV_IRQ_DMA_ERR) {
822 ivtv_irq_dma_err(itv);
823 }
824
825 if (combo & IVTV_IRQ_ENC_START_CAP) {
826 ivtv_irq_enc_start_cap(itv);
827 }
828
829 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
830 ivtv_irq_enc_vbi_cap(itv);
831 }
832
833 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 834 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
835 }
836
837 if (combo & IVTV_IRQ_ENC_EOS) {
838 IVTV_DEBUG_IRQ("ENC EOS\n");
839 set_bit(IVTV_F_I_EOS, &itv->i_flags);
840 wake_up(&itv->cap_w);
841 }
842
843 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
844 ivtv_irq_dec_data_req(itv);
845 }
846
847 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
848 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
849 ivtv_irq_vsync(itv);
850 }
851
852 if (combo & IVTV_IRQ_ENC_VIM_RST) {
853 IVTV_DEBUG_IRQ("VIM RST\n");
854 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
855 }
856
857 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
858 IVTV_DEBUG_INFO("Stereo mode changed\n");
859 }
860
861 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
862 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
863 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
864 struct ivtv_stream *s = &itv->streams[idx];
865
866 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
867 continue;
868 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
869 ivtv_dma_dec_start(s);
870 else
871 ivtv_dma_enc_start(s);
872 break;
873 }
874 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
875 ivtv_udma_start(itv);
876 }
877 }
878
dc02d50a
HV
879 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
880 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
881 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
882 struct ivtv_stream *s = &itv->streams[idx];
883
884 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
885 continue;
886 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
887 ivtv_dma_enc_start(s);
888 break;
889 }
890 }
891
892 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
893 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
894
1a0adaf3
HV
895 spin_unlock(&itv->dma_reg_lock);
896
897 /* If we've just handled a 'forced' vsync, it's safest to say it
898 * wasn't ours. Another device may have triggered it at just
899 * the right time.
900 */
901 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
902}
903
904void ivtv_unfinished_dma(unsigned long arg)
905{
906 struct ivtv *itv = (struct ivtv *)arg;
907
908 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
909 return;
910 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
911
912 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
913 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
914 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
915 itv->cur_dma_stream = -1;
916 wake_up(&itv->dma_waitq);
917}