]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/firewire/fw-ohci.c
firewire: Use lib/ implementation of CRC ITU-T.
[net-next-2.6.git] / drivers / firewire / fw-ohci.c
CommitLineData
ed568912
KH
1/* -*- c-basic-offset: 8 -*-
2 *
3 * fw-ohci.c - Driver for OHCI 1394 boards
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
cf3e72fd
AM
28#include <linux/dma-mapping.h>
29
ed568912
KH
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-ohci.h"
35
36#define descriptor_output_more 0
37#define descriptor_output_last (1 << 12)
38#define descriptor_input_more (2 << 12)
39#define descriptor_input_last (3 << 12)
40#define descriptor_status (1 << 11)
41#define descriptor_key_immediate (2 << 8)
42#define descriptor_ping (1 << 7)
43#define descriptor_yy (1 << 6)
44#define descriptor_no_irq (0 << 4)
45#define descriptor_irq_error (1 << 4)
46#define descriptor_irq_always (3 << 4)
47#define descriptor_branch_always (3 << 2)
295e3feb 48#define descriptor_wait (3 << 0)
ed568912
KH
49
50struct descriptor {
51 __le16 req_count;
52 __le16 control;
53 __le32 data_address;
54 __le32 branch_address;
55 __le16 res_count;
56 __le16 transfer_status;
57} __attribute__((aligned(16)));
58
295e3feb
KH
59struct db_descriptor {
60 __le16 first_size;
61 __le16 control;
62 __le16 second_req_count;
63 __le16 first_req_count;
64 __le32 branch_address;
65 __le16 second_res_count;
66 __le16 first_res_count;
67 __le32 reserved0;
68 __le32 first_buffer;
69 __le32 second_buffer;
70 __le32 reserved1;
71} __attribute__((aligned(16)));
72
72e318e0
KH
73#define control_set(regs) (regs)
74#define control_clear(regs) ((regs) + 4)
75#define command_ptr(regs) ((regs) + 12)
76#define context_match(regs) ((regs) + 16)
77
32b46093 78struct ar_buffer {
ed568912 79 struct descriptor descriptor;
32b46093
KH
80 struct ar_buffer *next;
81 __le32 data[0];
82};
ed568912 83
32b46093
KH
84struct ar_context {
85 struct fw_ohci *ohci;
86 struct ar_buffer *current_buffer;
87 struct ar_buffer *last_buffer;
88 void *pointer;
72e318e0 89 u32 regs;
ed568912
KH
90 struct tasklet_struct tasklet;
91};
92
30200739
KH
93struct context;
94
95typedef int (*descriptor_callback_t)(struct context *ctx,
96 struct descriptor *d,
97 struct descriptor *last);
98struct context {
373b2edd 99 struct fw_ohci *ohci;
30200739 100 u32 regs;
373b2edd 101
30200739
KH
102 struct descriptor *buffer;
103 dma_addr_t buffer_bus;
104 size_t buffer_size;
105 struct descriptor *head_descriptor;
106 struct descriptor *tail_descriptor;
107 struct descriptor *tail_descriptor_last;
108 struct descriptor *prev_descriptor;
109
110 descriptor_callback_t callback;
111
373b2edd 112 struct tasklet_struct tasklet;
30200739 113};
30200739 114
ed568912
KH
115#define it_header_sy(v) ((v) << 0)
116#define it_header_tcode(v) ((v) << 4)
117#define it_header_channel(v) ((v) << 8)
118#define it_header_tag(v) ((v) << 14)
119#define it_header_speed(v) ((v) << 16)
120#define it_header_data_length(v) ((v) << 16)
121
122struct iso_context {
123 struct fw_iso_context base;
30200739 124 struct context context;
9b32d5f3
KH
125 void *header;
126 size_t header_length;
ed568912
KH
127};
128
129#define CONFIG_ROM_SIZE 1024
130
131struct fw_ohci {
132 struct fw_card card;
133
e364cf4e 134 u32 version;
ed568912
KH
135 __iomem char *registers;
136 dma_addr_t self_id_bus;
137 __le32 *self_id_cpu;
138 struct tasklet_struct bus_reset_tasklet;
e636fe25 139 int node_id;
ed568912
KH
140 int generation;
141 int request_generation;
d60d7f1d 142 u32 bus_seconds;
ed568912
KH
143
144 /* Spinlock for accessing fw_ohci data. Never call out of
145 * this driver with this lock held. */
146 spinlock_t lock;
147 u32 self_id_buffer[512];
148
149 /* Config rom buffers */
150 __be32 *config_rom;
151 dma_addr_t config_rom_bus;
152 __be32 *next_config_rom;
153 dma_addr_t next_config_rom_bus;
154 u32 next_header;
155
156 struct ar_context ar_request_ctx;
157 struct ar_context ar_response_ctx;
f319b6a0
KH
158 struct context at_request_ctx;
159 struct context at_response_ctx;
ed568912
KH
160
161 u32 it_context_mask;
162 struct iso_context *it_context_list;
163 u32 ir_context_mask;
164 struct iso_context *ir_context_list;
165};
166
95688e97 167static inline struct fw_ohci *fw_ohci(struct fw_card *card)
ed568912
KH
168{
169 return container_of(card, struct fw_ohci, card);
170}
171
295e3feb
KH
172#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
173#define IR_CONTEXT_BUFFER_FILL 0x80000000
174#define IR_CONTEXT_ISOCH_HEADER 0x40000000
175#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
176#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
177#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
ed568912
KH
178
179#define CONTEXT_RUN 0x8000
180#define CONTEXT_WAKE 0x1000
181#define CONTEXT_DEAD 0x0800
182#define CONTEXT_ACTIVE 0x0400
183
184#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
185#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
186#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
187
188#define FW_OHCI_MAJOR 240
189#define OHCI1394_REGISTER_SIZE 0x800
190#define OHCI_LOOP_COUNT 500
191#define OHCI1394_PCI_HCI_Control 0x40
192#define SELF_ID_BUF_SIZE 0x800
32b46093 193#define OHCI_TCODE_PHY_PACKET 0x0e
e364cf4e 194#define OHCI_VERSION_1_1 0x010010
f319b6a0
KH
195#define ISO_BUFFER_SIZE (64 * 1024)
196#define AT_BUFFER_SIZE 4096
0edeefd9 197
ed568912
KH
198static char ohci_driver_name[] = KBUILD_MODNAME;
199
95688e97 200static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
ed568912
KH
201{
202 writel(data, ohci->registers + offset);
203}
204
95688e97 205static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
ed568912
KH
206{
207 return readl(ohci->registers + offset);
208}
209
95688e97 210static inline void flush_writes(const struct fw_ohci *ohci)
ed568912
KH
211{
212 /* Do a dummy read to flush writes. */
213 reg_read(ohci, OHCI1394_Version);
214}
215
216static int
217ohci_update_phy_reg(struct fw_card *card, int addr,
218 int clear_bits, int set_bits)
219{
220 struct fw_ohci *ohci = fw_ohci(card);
221 u32 val, old;
222
223 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
224 msleep(2);
225 val = reg_read(ohci, OHCI1394_PhyControl);
226 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
227 fw_error("failed to set phy reg bits.\n");
228 return -EBUSY;
229 }
230
231 old = OHCI1394_PhyControl_ReadData(val);
232 old = (old & ~clear_bits) | set_bits;
233 reg_write(ohci, OHCI1394_PhyControl,
234 OHCI1394_PhyControl_Write(addr, old));
235
236 return 0;
237}
238
32b46093 239static int ar_context_add_page(struct ar_context *ctx)
ed568912 240{
32b46093
KH
241 struct device *dev = ctx->ohci->card.device;
242 struct ar_buffer *ab;
243 dma_addr_t ab_bus;
244 size_t offset;
245
246 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
247 if (ab == NULL)
248 return -ENOMEM;
249
250 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
251 if (dma_mapping_error(ab_bus)) {
252 free_page((unsigned long) ab);
253 return -ENOMEM;
254 }
255
256 memset(&ab->descriptor, 0, sizeof ab->descriptor);
257 ab->descriptor.control = cpu_to_le16(descriptor_input_more |
258 descriptor_status |
259 descriptor_branch_always);
260 offset = offsetof(struct ar_buffer, data);
261 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
262 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
263 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
264 ab->descriptor.branch_address = 0;
265
266 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
267
268 ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
269 ctx->last_buffer->next = ab;
270 ctx->last_buffer = ab;
271
72e318e0 272 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
ed568912 273 flush_writes(ctx->ohci);
32b46093
KH
274
275 return 0;
ed568912
KH
276}
277
32b46093 278static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
ed568912 279{
ed568912 280 struct fw_ohci *ohci = ctx->ohci;
2639a6fb
KH
281 struct fw_packet p;
282 u32 status, length, tcode;
2639a6fb 283
32b46093
KH
284 p.header[0] = le32_to_cpu(buffer[0]);
285 p.header[1] = le32_to_cpu(buffer[1]);
286 p.header[2] = le32_to_cpu(buffer[2]);
2639a6fb
KH
287
288 tcode = (p.header[0] >> 4) & 0x0f;
289 switch (tcode) {
290 case TCODE_WRITE_QUADLET_REQUEST:
291 case TCODE_READ_QUADLET_RESPONSE:
32b46093 292 p.header[3] = (__force __u32) buffer[3];
2639a6fb 293 p.header_length = 16;
32b46093 294 p.payload_length = 0;
2639a6fb
KH
295 break;
296
2639a6fb 297 case TCODE_READ_BLOCK_REQUEST :
32b46093
KH
298 p.header[3] = le32_to_cpu(buffer[3]);
299 p.header_length = 16;
300 p.payload_length = 0;
301 break;
302
303 case TCODE_WRITE_BLOCK_REQUEST:
2639a6fb
KH
304 case TCODE_READ_BLOCK_RESPONSE:
305 case TCODE_LOCK_REQUEST:
306 case TCODE_LOCK_RESPONSE:
32b46093 307 p.header[3] = le32_to_cpu(buffer[3]);
2639a6fb 308 p.header_length = 16;
32b46093 309 p.payload_length = p.header[3] >> 16;
2639a6fb
KH
310 break;
311
312 case TCODE_WRITE_RESPONSE:
313 case TCODE_READ_QUADLET_REQUEST:
32b46093 314 case OHCI_TCODE_PHY_PACKET:
2639a6fb 315 p.header_length = 12;
32b46093 316 p.payload_length = 0;
2639a6fb
KH
317 break;
318 }
ed568912 319
32b46093
KH
320 p.payload = (void *) buffer + p.header_length;
321
322 /* FIXME: What to do about evt_* errors? */
323 length = (p.header_length + p.payload_length + 3) / 4;
324 status = le32_to_cpu(buffer[length]);
325
326 p.ack = ((status >> 16) & 0x1f) - 16;
327 p.speed = (status >> 21) & 0x7;
328 p.timestamp = status & 0xffff;
329 p.generation = ohci->request_generation;
ed568912
KH
330
331 /* The OHCI bus reset handler synthesizes a phy packet with
332 * the new generation number when a bus reset happens (see
333 * section 8.4.2.3). This helps us determine when a request
334 * was received and make sure we send the response in the same
335 * generation. We only need this for requests; for responses
336 * we use the unique tlabel for finding the matching
337 * request. */
338
2639a6fb 339 if (p.ack + 16 == 0x09)
32b46093 340 ohci->request_generation = (buffer[2] >> 16) & 0xff;
ed568912 341 else if (ctx == &ohci->ar_request_ctx)
2639a6fb 342 fw_core_handle_request(&ohci->card, &p);
ed568912 343 else
2639a6fb 344 fw_core_handle_response(&ohci->card, &p);
ed568912 345
32b46093
KH
346 return buffer + length + 1;
347}
ed568912 348
32b46093
KH
349static void ar_context_tasklet(unsigned long data)
350{
351 struct ar_context *ctx = (struct ar_context *)data;
352 struct fw_ohci *ohci = ctx->ohci;
353 struct ar_buffer *ab;
354 struct descriptor *d;
355 void *buffer, *end;
356
357 ab = ctx->current_buffer;
358 d = &ab->descriptor;
359
360 if (d->res_count == 0) {
361 size_t size, rest, offset;
362
363 /* This descriptor is finished and we may have a
364 * packet split across this and the next buffer. We
365 * reuse the page for reassembling the split packet. */
366
367 offset = offsetof(struct ar_buffer, data);
368 dma_unmap_single(ohci->card.device,
369 ab->descriptor.data_address - offset,
370 PAGE_SIZE, DMA_BIDIRECTIONAL);
371
372 buffer = ab;
373 ab = ab->next;
374 d = &ab->descriptor;
375 size = buffer + PAGE_SIZE - ctx->pointer;
376 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
377 memmove(buffer, ctx->pointer, size);
378 memcpy(buffer + size, ab->data, rest);
379 ctx->current_buffer = ab;
380 ctx->pointer = (void *) ab->data + rest;
381 end = buffer + size + rest;
382
383 while (buffer < end)
384 buffer = handle_ar_packet(ctx, buffer);
385
386 free_page((unsigned long)buffer);
387 ar_context_add_page(ctx);
388 } else {
389 buffer = ctx->pointer;
390 ctx->pointer = end =
391 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
392
393 while (buffer < end)
394 buffer = handle_ar_packet(ctx, buffer);
395 }
ed568912
KH
396}
397
398static int
72e318e0 399ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
ed568912 400{
32b46093 401 struct ar_buffer ab;
ed568912 402
72e318e0
KH
403 ctx->regs = regs;
404 ctx->ohci = ohci;
405 ctx->last_buffer = &ab;
ed568912
KH
406 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
407
32b46093
KH
408 ar_context_add_page(ctx);
409 ar_context_add_page(ctx);
410 ctx->current_buffer = ab.next;
411 ctx->pointer = ctx->current_buffer->data;
412
72e318e0
KH
413 reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address);
414 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN);
32b46093 415 flush_writes(ctx->ohci);
ed568912
KH
416
417 return 0;
418}
373b2edd 419
30200739
KH
420static void context_tasklet(unsigned long data)
421{
422 struct context *ctx = (struct context *) data;
423 struct fw_ohci *ohci = ctx->ohci;
424 struct descriptor *d, *last;
425 u32 address;
426 int z;
427
428 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
429 ctx->buffer_size, DMA_TO_DEVICE);
430
431 d = ctx->tail_descriptor;
432 last = ctx->tail_descriptor_last;
433
434 while (last->branch_address != 0) {
435 address = le32_to_cpu(last->branch_address);
436 z = address & 0xf;
437 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d;
438 last = (z == 2) ? d : d + z - 1;
439
440 if (!ctx->callback(ctx, d, last))
441 break;
442
443 ctx->tail_descriptor = d;
444 ctx->tail_descriptor_last = last;
445 }
446}
447
448static int
449context_init(struct context *ctx, struct fw_ohci *ohci,
450 size_t buffer_size, u32 regs,
451 descriptor_callback_t callback)
452{
453 ctx->ohci = ohci;
454 ctx->regs = regs;
455 ctx->buffer_size = buffer_size;
456 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
457 if (ctx->buffer == NULL)
458 return -ENOMEM;
459
460 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
461 ctx->callback = callback;
462
463 ctx->buffer_bus =
464 dma_map_single(ohci->card.device, ctx->buffer,
465 buffer_size, DMA_TO_DEVICE);
466 if (dma_mapping_error(ctx->buffer_bus)) {
467 kfree(ctx->buffer);
468 return -ENOMEM;
469 }
470
471 ctx->head_descriptor = ctx->buffer;
472 ctx->prev_descriptor = ctx->buffer;
473 ctx->tail_descriptor = ctx->buffer;
474 ctx->tail_descriptor_last = ctx->buffer;
475
476 /* We put a dummy descriptor in the buffer that has a NULL
477 * branch address and looks like it's been sent. That way we
478 * have a descriptor to append DMA programs to. Also, the
479 * ring buffer invariant is that it always has at least one
480 * element so that head == tail means buffer full. */
481
482 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
483 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
484 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
485 ctx->head_descriptor++;
486
487 return 0;
488}
489
9b32d5f3 490static void
30200739
KH
491context_release(struct context *ctx)
492{
493 struct fw_card *card = &ctx->ohci->card;
494
495 dma_unmap_single(card->device, ctx->buffer_bus,
496 ctx->buffer_size, DMA_TO_DEVICE);
497 kfree(ctx->buffer);
498}
499
500static struct descriptor *
501context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
502{
503 struct descriptor *d, *tail, *end;
504
505 d = ctx->head_descriptor;
506 tail = ctx->tail_descriptor;
507 end = ctx->buffer + ctx->buffer_size / sizeof(struct descriptor);
508
509 if (d + z <= tail) {
510 goto has_space;
511 } else if (d > tail && d + z <= end) {
512 goto has_space;
513 } else if (d > tail && ctx->buffer + z <= tail) {
514 d = ctx->buffer;
515 goto has_space;
516 }
517
518 return NULL;
519
520 has_space:
521 memset(d, 0, z * sizeof *d);
522 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
523
524 return d;
525}
526
295e3feb 527static void context_run(struct context *ctx, u32 extra)
30200739
KH
528{
529 struct fw_ohci *ohci = ctx->ohci;
530
531 reg_write(ohci, command_ptr(ctx->regs),
532 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
533 reg_write(ohci, control_clear(ctx->regs), ~0);
295e3feb 534 reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra);
30200739
KH
535 flush_writes(ohci);
536}
537
538static void context_append(struct context *ctx,
539 struct descriptor *d, int z, int extra)
540{
541 dma_addr_t d_bus;
542
543 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
544
545 ctx->head_descriptor = d + z + extra;
546 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
547 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
548
549 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
550 ctx->buffer_size, DMA_TO_DEVICE);
551
552 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
553 flush_writes(ctx->ohci);
554}
555
556static void context_stop(struct context *ctx)
557{
558 u32 reg;
b8295668 559 int i;
30200739
KH
560
561 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
b8295668 562 flush_writes(ctx->ohci);
30200739 563
b8295668
KH
564 for (i = 0; i < 10; i++) {
565 reg = reg_read(ctx->ohci, control_set(ctx->regs));
566 if ((reg & CONTEXT_ACTIVE) == 0)
567 break;
568
569 fw_notify("context_stop: still active (0x%08x)\n", reg);
570 msleep(1);
571 }
30200739 572}
ed568912 573
f319b6a0
KH
574struct driver_data {
575 struct fw_packet *packet;
576};
ed568912 577
f319b6a0
KH
578/* This function apppends a packet to the DMA queue for transmission.
579 * Must always be called with the ochi->lock held to ensure proper
580 * generation handling and locking around packet queue manipulation. */
581static int
582at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
ed568912 583{
ed568912 584 struct fw_ohci *ohci = ctx->ohci;
f319b6a0
KH
585 dma_addr_t d_bus, payload_bus;
586 struct driver_data *driver_data;
587 struct descriptor *d, *last;
588 __le32 *header;
ed568912 589 int z, tcode;
f319b6a0 590 u32 reg;
ed568912 591
f319b6a0
KH
592 d = context_get_descriptors(ctx, 4, &d_bus);
593 if (d == NULL) {
594 packet->ack = RCODE_SEND_ERROR;
595 return -1;
ed568912
KH
596 }
597
f319b6a0
KH
598 d[0].control = cpu_to_le16(descriptor_key_immediate);
599 d[0].res_count = cpu_to_le16(packet->timestamp);
600
ed568912
KH
601 /* The DMA format for asyncronous link packets is different
602 * from the IEEE1394 layout, so shift the fields around
603 * accordingly. If header_length is 8, it's a PHY packet, to
604 * which we need to prepend an extra quadlet. */
f319b6a0
KH
605
606 header = (__le32 *) &d[1];
ed568912 607 if (packet->header_length > 8) {
f319b6a0
KH
608 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
609 (packet->speed << 16));
610 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
611 (packet->header[0] & 0xffff0000));
612 header[2] = cpu_to_le32(packet->header[2]);
ed568912
KH
613
614 tcode = (packet->header[0] >> 4) & 0x0f;
615 if (TCODE_IS_BLOCK_PACKET(tcode))
f319b6a0 616 header[3] = cpu_to_le32(packet->header[3]);
ed568912 617 else
f319b6a0
KH
618 header[3] = (__force __le32) packet->header[3];
619
620 d[0].req_count = cpu_to_le16(packet->header_length);
ed568912 621 } else {
f319b6a0
KH
622 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
623 (packet->speed << 16));
624 header[1] = cpu_to_le32(packet->header[0]);
625 header[2] = cpu_to_le32(packet->header[1]);
626 d[0].req_count = cpu_to_le16(12);
ed568912
KH
627 }
628
f319b6a0
KH
629 driver_data = (struct driver_data *) &d[3];
630 driver_data->packet = packet;
20d11673 631 packet->driver_data = driver_data;
f319b6a0
KH
632
633 if (packet->payload_length > 0) {
634 payload_bus =
635 dma_map_single(ohci->card.device, packet->payload,
636 packet->payload_length, DMA_TO_DEVICE);
637 if (dma_mapping_error(payload_bus)) {
638 packet->ack = RCODE_SEND_ERROR;
639 return -1;
640 }
641
642 d[2].req_count = cpu_to_le16(packet->payload_length);
643 d[2].data_address = cpu_to_le32(payload_bus);
644 last = &d[2];
645 z = 3;
ed568912 646 } else {
f319b6a0
KH
647 last = &d[0];
648 z = 2;
ed568912 649 }
ed568912 650
f319b6a0
KH
651 last->control |= cpu_to_le16(descriptor_output_last |
652 descriptor_irq_always |
653 descriptor_branch_always);
ed568912 654
f319b6a0
KH
655 /* FIXME: Document how the locking works. */
656 if (ohci->generation != packet->generation) {
657 packet->ack = RCODE_GENERATION;
658 return -1;
659 }
660
661 context_append(ctx, d, z, 4 - z);
ed568912 662
f319b6a0 663 /* If the context isn't already running, start it up. */
72e318e0 664 reg = reg_read(ctx->ohci, control_set(ctx->regs));
053b3080 665 if ((reg & CONTEXT_RUN) == 0)
f319b6a0
KH
666 context_run(ctx, 0);
667
668 return 0;
ed568912
KH
669}
670
f319b6a0
KH
671static int handle_at_packet(struct context *context,
672 struct descriptor *d,
673 struct descriptor *last)
ed568912 674{
f319b6a0 675 struct driver_data *driver_data;
ed568912 676 struct fw_packet *packet;
f319b6a0
KH
677 struct fw_ohci *ohci = context->ohci;
678 dma_addr_t payload_bus;
ed568912
KH
679 int evt;
680
f319b6a0
KH
681 if (last->transfer_status == 0)
682 /* This descriptor isn't done yet, stop iteration. */
683 return 0;
ed568912 684
f319b6a0
KH
685 driver_data = (struct driver_data *) &d[3];
686 packet = driver_data->packet;
687 if (packet == NULL)
688 /* This packet was cancelled, just continue. */
689 return 1;
730c32f5 690
f319b6a0
KH
691 payload_bus = le32_to_cpu(last->data_address);
692 if (payload_bus != 0)
693 dma_unmap_single(ohci->card.device, payload_bus,
ed568912 694 packet->payload_length, DMA_TO_DEVICE);
ed568912 695
f319b6a0
KH
696 evt = le16_to_cpu(last->transfer_status) & 0x1f;
697 packet->timestamp = le16_to_cpu(last->res_count);
ed568912 698
f319b6a0
KH
699 switch (evt) {
700 case OHCI1394_evt_timeout:
701 /* Async response transmit timed out. */
702 packet->ack = RCODE_CANCELLED;
703 break;
ed568912 704
f319b6a0
KH
705 case OHCI1394_evt_flushed:
706 /* The packet was flushed should give same error as
707 * when we try to use a stale generation count. */
708 packet->ack = RCODE_GENERATION;
709 break;
ed568912 710
f319b6a0
KH
711 case OHCI1394_evt_missing_ack:
712 /* Using a valid (current) generation count, but the
713 * node is not on the bus or not sending acks. */
714 packet->ack = RCODE_NO_ACK;
715 break;
ed568912 716
f319b6a0
KH
717 case ACK_COMPLETE + 0x10:
718 case ACK_PENDING + 0x10:
719 case ACK_BUSY_X + 0x10:
720 case ACK_BUSY_A + 0x10:
721 case ACK_BUSY_B + 0x10:
722 case ACK_DATA_ERROR + 0x10:
723 case ACK_TYPE_ERROR + 0x10:
724 packet->ack = evt - 0x10;
725 break;
ed568912 726
f319b6a0
KH
727 default:
728 packet->ack = RCODE_SEND_ERROR;
729 break;
730 }
ed568912 731
f319b6a0 732 packet->callback(packet, &ohci->card, packet->ack);
ed568912 733
f319b6a0 734 return 1;
ed568912
KH
735}
736
e636fe25 737#define header_get_destination(q) (((q) >> 16) & 0xffff)
93c4cceb
KH
738#define header_get_tcode(q) (((q) >> 4) & 0x0f)
739#define header_get_offset_high(q) (((q) >> 0) & 0xffff)
740#define header_get_data_length(q) (((q) >> 16) & 0xffff)
741#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
742
743static void
744handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
745{
746 struct fw_packet response;
747 int tcode, length, i;
748
749 tcode = header_get_tcode(packet->header[0]);
750 if (TCODE_IS_BLOCK_PACKET(tcode))
751 length = header_get_data_length(packet->header[3]);
752 else
753 length = 4;
754
755 i = csr - CSR_CONFIG_ROM;
756 if (i + length > CONFIG_ROM_SIZE) {
757 fw_fill_response(&response, packet->header,
758 RCODE_ADDRESS_ERROR, NULL, 0);
759 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
760 fw_fill_response(&response, packet->header,
761 RCODE_TYPE_ERROR, NULL, 0);
762 } else {
763 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
764 (void *) ohci->config_rom + i, length);
765 }
766
767 fw_core_handle_response(&ohci->card, &response);
768}
769
770static void
771handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
772{
773 struct fw_packet response;
774 int tcode, length, ext_tcode, sel;
775 __be32 *payload, lock_old;
776 u32 lock_arg, lock_data;
777
778 tcode = header_get_tcode(packet->header[0]);
779 length = header_get_data_length(packet->header[3]);
780 payload = packet->payload;
781 ext_tcode = header_get_extended_tcode(packet->header[3]);
782
783 if (tcode == TCODE_LOCK_REQUEST &&
784 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
785 lock_arg = be32_to_cpu(payload[0]);
786 lock_data = be32_to_cpu(payload[1]);
787 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
788 lock_arg = 0;
789 lock_data = 0;
790 } else {
791 fw_fill_response(&response, packet->header,
792 RCODE_TYPE_ERROR, NULL, 0);
793 goto out;
794 }
795
796 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
797 reg_write(ohci, OHCI1394_CSRData, lock_data);
798 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
799 reg_write(ohci, OHCI1394_CSRControl, sel);
800
801 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
802 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
803 else
804 fw_notify("swap not done yet\n");
805
806 fw_fill_response(&response, packet->header,
807 RCODE_COMPLETE, &lock_old, sizeof lock_old);
808 out:
809 fw_core_handle_response(&ohci->card, &response);
810}
811
812static void
f319b6a0 813handle_local_request(struct context *ctx, struct fw_packet *packet)
93c4cceb
KH
814{
815 u64 offset;
816 u32 csr;
817
473d28c7
KH
818 if (ctx == &ctx->ohci->at_request_ctx) {
819 packet->ack = ACK_PENDING;
820 packet->callback(packet, &ctx->ohci->card, packet->ack);
821 }
93c4cceb
KH
822
823 offset =
824 ((unsigned long long)
825 header_get_offset_high(packet->header[1]) << 32) |
826 packet->header[2];
827 csr = offset - CSR_REGISTER_BASE;
828
829 /* Handle config rom reads. */
830 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
831 handle_local_rom(ctx->ohci, packet, csr);
832 else switch (csr) {
833 case CSR_BUS_MANAGER_ID:
834 case CSR_BANDWIDTH_AVAILABLE:
835 case CSR_CHANNELS_AVAILABLE_HI:
836 case CSR_CHANNELS_AVAILABLE_LO:
837 handle_local_lock(ctx->ohci, packet, csr);
838 break;
839 default:
840 if (ctx == &ctx->ohci->at_request_ctx)
841 fw_core_handle_request(&ctx->ohci->card, packet);
842 else
843 fw_core_handle_response(&ctx->ohci->card, packet);
844 break;
845 }
473d28c7
KH
846
847 if (ctx == &ctx->ohci->at_response_ctx) {
848 packet->ack = ACK_COMPLETE;
849 packet->callback(packet, &ctx->ohci->card, packet->ack);
850 }
93c4cceb 851}
e636fe25 852
ed568912 853static void
f319b6a0 854at_context_transmit(struct context *ctx, struct fw_packet *packet)
ed568912 855{
ed568912 856 unsigned long flags;
f319b6a0 857 int retval;
ed568912
KH
858
859 spin_lock_irqsave(&ctx->ohci->lock, flags);
860
e636fe25
KH
861 if (header_get_destination(packet->header[0]) == ctx->ohci->node_id &&
862 ctx->ohci->generation == packet->generation) {
93c4cceb
KH
863 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
864 handle_local_request(ctx, packet);
865 return;
e636fe25 866 }
ed568912 867
f319b6a0 868 retval = at_context_queue_packet(ctx, packet);
ed568912
KH
869 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
870
f319b6a0
KH
871 if (retval < 0)
872 packet->callback(packet, &ctx->ohci->card, packet->ack);
873
ed568912
KH
874}
875
876static void bus_reset_tasklet(unsigned long data)
877{
878 struct fw_ohci *ohci = (struct fw_ohci *)data;
e636fe25 879 int self_id_count, i, j, reg;
ed568912
KH
880 int generation, new_generation;
881 unsigned long flags;
882
883 reg = reg_read(ohci, OHCI1394_NodeID);
884 if (!(reg & OHCI1394_NodeID_idValid)) {
885 fw_error("node ID not valid, new bus reset in progress\n");
886 return;
887 }
e636fe25 888 ohci->node_id = reg & 0xffff;
ed568912
KH
889
890 /* The count in the SelfIDCount register is the number of
891 * bytes in the self ID receive buffer. Since we also receive
892 * the inverted quadlets and a header quadlet, we shift one
893 * bit extra to get the actual number of self IDs. */
894
895 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
896 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
897
898 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
899 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
900 fw_error("inconsistent self IDs\n");
901 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
902 }
903
904 /* Check the consistency of the self IDs we just read. The
905 * problem we face is that a new bus reset can start while we
906 * read out the self IDs from the DMA buffer. If this happens,
907 * the DMA buffer will be overwritten with new self IDs and we
908 * will read out inconsistent data. The OHCI specification
909 * (section 11.2) recommends a technique similar to
910 * linux/seqlock.h, where we remember the generation of the
911 * self IDs in the buffer before reading them out and compare
912 * it to the current generation after reading them out. If
913 * the two generations match we know we have a consistent set
914 * of self IDs. */
915
916 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
917 if (new_generation != generation) {
918 fw_notify("recursive bus reset detected, "
919 "discarding self ids\n");
920 return;
921 }
922
923 /* FIXME: Document how the locking works. */
924 spin_lock_irqsave(&ohci->lock, flags);
925
926 ohci->generation = generation;
f319b6a0
KH
927 context_stop(&ohci->at_request_ctx);
928 context_stop(&ohci->at_response_ctx);
ed568912
KH
929 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
930
931 /* This next bit is unrelated to the AT context stuff but we
932 * have to do it under the spinlock also. If a new config rom
933 * was set up before this reset, the old one is now no longer
934 * in use and we can free it. Update the config rom pointers
935 * to point to the current config rom and clear the
936 * next_config_rom pointer so a new udpate can take place. */
937
938 if (ohci->next_config_rom != NULL) {
939 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
940 ohci->config_rom, ohci->config_rom_bus);
941 ohci->config_rom = ohci->next_config_rom;
942 ohci->config_rom_bus = ohci->next_config_rom_bus;
943 ohci->next_config_rom = NULL;
944
945 /* Restore config_rom image and manually update
946 * config_rom registers. Writing the header quadlet
947 * will indicate that the config rom is ready, so we
948 * do that last. */
949 reg_write(ohci, OHCI1394_BusOptions,
950 be32_to_cpu(ohci->config_rom[2]));
951 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
952 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
953 }
954
955 spin_unlock_irqrestore(&ohci->lock, flags);
956
e636fe25 957 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
ed568912
KH
958 self_id_count, ohci->self_id_buffer);
959}
960
961static irqreturn_t irq_handler(int irq, void *data)
962{
963 struct fw_ohci *ohci = data;
d60d7f1d 964 u32 event, iso_event, cycle_time;
ed568912
KH
965 int i;
966
967 event = reg_read(ohci, OHCI1394_IntEventClear);
968
969 if (!event)
970 return IRQ_NONE;
971
972 reg_write(ohci, OHCI1394_IntEventClear, event);
973
974 if (event & OHCI1394_selfIDComplete)
975 tasklet_schedule(&ohci->bus_reset_tasklet);
976
977 if (event & OHCI1394_RQPkt)
978 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
979
980 if (event & OHCI1394_RSPkt)
981 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
982
983 if (event & OHCI1394_reqTxComplete)
984 tasklet_schedule(&ohci->at_request_ctx.tasklet);
985
986 if (event & OHCI1394_respTxComplete)
987 tasklet_schedule(&ohci->at_response_ctx.tasklet);
988
c889475f 989 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
ed568912
KH
990 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
991
992 while (iso_event) {
993 i = ffs(iso_event) - 1;
30200739 994 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
ed568912
KH
995 iso_event &= ~(1 << i);
996 }
997
c889475f 998 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
ed568912
KH
999 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1000
1001 while (iso_event) {
1002 i = ffs(iso_event) - 1;
30200739 1003 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
ed568912
KH
1004 iso_event &= ~(1 << i);
1005 }
1006
d60d7f1d
KH
1007 if (event & OHCI1394_cycle64Seconds) {
1008 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1009 if ((cycle_time & 0x80000000) == 0)
1010 ohci->bus_seconds++;
1011 }
1012
ed568912
KH
1013 return IRQ_HANDLED;
1014}
1015
1016static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1017{
1018 struct fw_ohci *ohci = fw_ohci(card);
1019 struct pci_dev *dev = to_pci_dev(card->device);
1020
1021 /* When the link is not yet enabled, the atomic config rom
1022 * update mechanism described below in ohci_set_config_rom()
1023 * is not active. We have to update ConfigRomHeader and
1024 * BusOptions manually, and the write to ConfigROMmap takes
1025 * effect immediately. We tie this to the enabling of the
1026 * link, so we have a valid config rom before enabling - the
1027 * OHCI requires that ConfigROMhdr and BusOptions have valid
1028 * values before enabling.
1029 *
1030 * However, when the ConfigROMmap is written, some controllers
1031 * always read back quadlets 0 and 2 from the config rom to
1032 * the ConfigRomHeader and BusOptions registers on bus reset.
1033 * They shouldn't do that in this initial case where the link
1034 * isn't enabled. This means we have to use the same
1035 * workaround here, setting the bus header to 0 and then write
1036 * the right values in the bus reset tasklet.
1037 */
1038
1039 ohci->next_config_rom =
1040 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1041 &ohci->next_config_rom_bus, GFP_KERNEL);
1042 if (ohci->next_config_rom == NULL)
1043 return -ENOMEM;
1044
1045 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1046 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1047
1048 ohci->next_header = config_rom[0];
1049 ohci->next_config_rom[0] = 0;
1050 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1051 reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1052 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1053
1054 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1055
1056 if (request_irq(dev->irq, irq_handler,
65efffa8 1057 IRQF_SHARED, ohci_driver_name, ohci)) {
ed568912
KH
1058 fw_error("Failed to allocate shared interrupt %d.\n",
1059 dev->irq);
1060 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1061 ohci->config_rom, ohci->config_rom_bus);
1062 return -EIO;
1063 }
1064
1065 reg_write(ohci, OHCI1394_HCControlSet,
1066 OHCI1394_HCControl_linkEnable |
1067 OHCI1394_HCControl_BIBimageValid);
1068 flush_writes(ohci);
1069
1070 /* We are ready to go, initiate bus reset to finish the
1071 * initialization. */
1072
1073 fw_core_initiate_bus_reset(&ohci->card, 1);
1074
1075 return 0;
1076}
1077
1078static int
1079ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1080{
1081 struct fw_ohci *ohci;
1082 unsigned long flags;
1083 int retval = 0;
1084 __be32 *next_config_rom;
1085 dma_addr_t next_config_rom_bus;
1086
1087 ohci = fw_ohci(card);
1088
1089 /* When the OHCI controller is enabled, the config rom update
1090 * mechanism is a bit tricky, but easy enough to use. See
1091 * section 5.5.6 in the OHCI specification.
1092 *
1093 * The OHCI controller caches the new config rom address in a
1094 * shadow register (ConfigROMmapNext) and needs a bus reset
1095 * for the changes to take place. When the bus reset is
1096 * detected, the controller loads the new values for the
1097 * ConfigRomHeader and BusOptions registers from the specified
1098 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1099 * shadow register. All automatically and atomically.
1100 *
1101 * Now, there's a twist to this story. The automatic load of
1102 * ConfigRomHeader and BusOptions doesn't honor the
1103 * noByteSwapData bit, so with a be32 config rom, the
1104 * controller will load be32 values in to these registers
1105 * during the atomic update, even on litte endian
1106 * architectures. The workaround we use is to put a 0 in the
1107 * header quadlet; 0 is endian agnostic and means that the
1108 * config rom isn't ready yet. In the bus reset tasklet we
1109 * then set up the real values for the two registers.
1110 *
1111 * We use ohci->lock to avoid racing with the code that sets
1112 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1113 */
1114
1115 next_config_rom =
1116 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1117 &next_config_rom_bus, GFP_KERNEL);
1118 if (next_config_rom == NULL)
1119 return -ENOMEM;
1120
1121 spin_lock_irqsave(&ohci->lock, flags);
1122
1123 if (ohci->next_config_rom == NULL) {
1124 ohci->next_config_rom = next_config_rom;
1125 ohci->next_config_rom_bus = next_config_rom_bus;
1126
1127 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1128 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1129 length * 4);
1130
1131 ohci->next_header = config_rom[0];
1132 ohci->next_config_rom[0] = 0;
1133
1134 reg_write(ohci, OHCI1394_ConfigROMmap,
1135 ohci->next_config_rom_bus);
1136 } else {
1137 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1138 next_config_rom, next_config_rom_bus);
1139 retval = -EBUSY;
1140 }
1141
1142 spin_unlock_irqrestore(&ohci->lock, flags);
1143
1144 /* Now initiate a bus reset to have the changes take
1145 * effect. We clean up the old config rom memory and DMA
1146 * mappings in the bus reset tasklet, since the OHCI
1147 * controller could need to access it before the bus reset
1148 * takes effect. */
1149 if (retval == 0)
1150 fw_core_initiate_bus_reset(&ohci->card, 1);
1151
1152 return retval;
1153}
1154
1155static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1156{
1157 struct fw_ohci *ohci = fw_ohci(card);
1158
1159 at_context_transmit(&ohci->at_request_ctx, packet);
1160}
1161
1162static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1163{
1164 struct fw_ohci *ohci = fw_ohci(card);
1165
1166 at_context_transmit(&ohci->at_response_ctx, packet);
1167}
1168
730c32f5
KH
1169static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1170{
1171 struct fw_ohci *ohci = fw_ohci(card);
f319b6a0
KH
1172 struct context *ctx = &ohci->at_request_ctx;
1173 struct driver_data *driver_data = packet->driver_data;
1174 int retval = -ENOENT;
730c32f5 1175
f319b6a0 1176 tasklet_disable(&ctx->tasklet);
730c32f5 1177
f319b6a0
KH
1178 if (packet->ack != 0)
1179 goto out;
730c32f5 1180
f319b6a0
KH
1181 driver_data->packet = NULL;
1182 packet->ack = RCODE_CANCELLED;
1183 packet->callback(packet, &ohci->card, packet->ack);
1184 retval = 0;
730c32f5 1185
f319b6a0
KH
1186 out:
1187 tasklet_enable(&ctx->tasklet);
730c32f5 1188
f319b6a0 1189 return retval;
730c32f5
KH
1190}
1191
ed568912
KH
1192static int
1193ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1194{
1195 struct fw_ohci *ohci = fw_ohci(card);
1196 unsigned long flags;
907293d7 1197 int n, retval = 0;
ed568912 1198
907293d7
SR
1199 /* FIXME: Make sure this bitmask is cleared when we clear the busReset
1200 * interrupt bit. Clear physReqResourceAllBuses on bus reset. */
ed568912
KH
1201
1202 spin_lock_irqsave(&ohci->lock, flags);
1203
1204 if (ohci->generation != generation) {
1205 retval = -ESTALE;
1206 goto out;
1207 }
1208
907293d7
SR
1209 /* NOTE, if the node ID contains a non-local bus ID, physical DMA is
1210 * enabled for _all_ nodes on remote buses. */
1211
1212 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1213 if (n < 32)
1214 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1215 else
1216 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1217
ed568912 1218 flush_writes(ohci);
ed568912 1219 out:
6cad95fe 1220 spin_unlock_irqrestore(&ohci->lock, flags);
ed568912
KH
1221 return retval;
1222}
373b2edd 1223
d60d7f1d
KH
1224static u64
1225ohci_get_bus_time(struct fw_card *card)
1226{
1227 struct fw_ohci *ohci = fw_ohci(card);
1228 u32 cycle_time;
1229 u64 bus_time;
1230
1231 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1232 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1233
1234 return bus_time;
1235}
1236
d2746dc1
KH
1237static int handle_ir_dualbuffer_packet(struct context *context,
1238 struct descriptor *d,
1239 struct descriptor *last)
ed568912 1240{
295e3feb
KH
1241 struct iso_context *ctx =
1242 container_of(context, struct iso_context, context);
1243 struct db_descriptor *db = (struct db_descriptor *) d;
c70dc788 1244 __le32 *ir_header;
9b32d5f3 1245 size_t header_length;
c70dc788
KH
1246 void *p, *end;
1247 int i;
d2746dc1 1248
295e3feb
KH
1249 if (db->first_res_count > 0 && db->second_res_count > 0)
1250 /* This descriptor isn't done yet, stop iteration. */
1251 return 0;
1252
c70dc788
KH
1253 header_length = le16_to_cpu(db->first_req_count) -
1254 le16_to_cpu(db->first_res_count);
1255
1256 i = ctx->header_length;
1257 p = db + 1;
1258 end = p + header_length;
1259 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
15536221
KH
1260 /* The iso header is byteswapped to little endian by
1261 * the controller, but the remaining header quadlets
1262 * are big endian. We want to present all the headers
1263 * as big endian, so we have to swap the first
1264 * quadlet. */
1265 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1266 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
c70dc788
KH
1267 i += ctx->base.header_size;
1268 p += ctx->base.header_size + 4;
1269 }
1270
1271 ctx->header_length = i;
9b32d5f3
KH
1272
1273 if (le16_to_cpu(db->control) & descriptor_irq_always) {
c70dc788
KH
1274 ir_header = (__le32 *) (db + 1);
1275 ctx->base.callback(&ctx->base,
1276 le32_to_cpu(ir_header[0]) & 0xffff,
9b32d5f3 1277 ctx->header_length, ctx->header,
295e3feb 1278 ctx->base.callback_data);
9b32d5f3
KH
1279 ctx->header_length = 0;
1280 }
ed568912 1281
295e3feb 1282 return 1;
ed568912
KH
1283}
1284
30200739
KH
1285static int handle_it_packet(struct context *context,
1286 struct descriptor *d,
1287 struct descriptor *last)
ed568912 1288{
30200739
KH
1289 struct iso_context *ctx =
1290 container_of(context, struct iso_context, context);
373b2edd 1291
30200739
KH
1292 if (last->transfer_status == 0)
1293 /* This descriptor isn't done yet, stop iteration. */
1294 return 0;
1295
1296 if (le16_to_cpu(last->control) & descriptor_irq_always)
9b32d5f3
KH
1297 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1298 0, NULL, ctx->base.callback_data);
30200739
KH
1299
1300 return 1;
ed568912
KH
1301}
1302
30200739 1303static struct fw_iso_context *
eb0306ea 1304ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
ed568912
KH
1305{
1306 struct fw_ohci *ohci = fw_ohci(card);
1307 struct iso_context *ctx, *list;
30200739 1308 descriptor_callback_t callback;
295e3feb 1309 u32 *mask, regs;
ed568912 1310 unsigned long flags;
9b32d5f3 1311 int index, retval = -ENOMEM;
ed568912
KH
1312
1313 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1314 mask = &ohci->it_context_mask;
1315 list = ohci->it_context_list;
30200739 1316 callback = handle_it_packet;
ed568912 1317 } else {
373b2edd
SR
1318 mask = &ohci->ir_context_mask;
1319 list = ohci->ir_context_list;
c70dc788 1320 callback = handle_ir_dualbuffer_packet;
ed568912
KH
1321 }
1322
c70dc788 1323 /* FIXME: We need a fallback for pre 1.1 OHCI. */
e364cf4e
KH
1324 if (callback == handle_ir_dualbuffer_packet &&
1325 ohci->version < OHCI_VERSION_1_1)
1326 return ERR_PTR(-EINVAL);
1327
ed568912
KH
1328 spin_lock_irqsave(&ohci->lock, flags);
1329 index = ffs(*mask) - 1;
1330 if (index >= 0)
1331 *mask &= ~(1 << index);
1332 spin_unlock_irqrestore(&ohci->lock, flags);
1333
1334 if (index < 0)
1335 return ERR_PTR(-EBUSY);
1336
373b2edd
SR
1337 if (type == FW_ISO_CONTEXT_TRANSMIT)
1338 regs = OHCI1394_IsoXmitContextBase(index);
1339 else
1340 regs = OHCI1394_IsoRcvContextBase(index);
1341
ed568912
KH
1342 ctx = &list[index];
1343 memset(ctx, 0, sizeof *ctx);
9b32d5f3
KH
1344 ctx->header_length = 0;
1345 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1346 if (ctx->header == NULL)
1347 goto out;
1348
30200739 1349 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
295e3feb 1350 regs, callback);
9b32d5f3
KH
1351 if (retval < 0)
1352 goto out_with_header;
ed568912
KH
1353
1354 return &ctx->base;
9b32d5f3
KH
1355
1356 out_with_header:
1357 free_page((unsigned long)ctx->header);
1358 out:
1359 spin_lock_irqsave(&ohci->lock, flags);
1360 *mask |= 1 << index;
1361 spin_unlock_irqrestore(&ohci->lock, flags);
1362
1363 return ERR_PTR(retval);
ed568912
KH
1364}
1365
eb0306ea
KH
1366static int ohci_start_iso(struct fw_iso_context *base,
1367 s32 cycle, u32 sync, u32 tags)
ed568912 1368{
373b2edd 1369 struct iso_context *ctx = container_of(base, struct iso_context, base);
30200739 1370 struct fw_ohci *ohci = ctx->context.ohci;
8a2f7d93 1371 u32 control, match;
ed568912
KH
1372 int index;
1373
295e3feb
KH
1374 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1375 index = ctx - ohci->it_context_list;
8a2f7d93
KH
1376 match = 0;
1377 if (cycle >= 0)
1378 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
295e3feb 1379 (cycle & 0x7fff) << 16;
21efb3cf 1380
295e3feb
KH
1381 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1382 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
8a2f7d93 1383 context_run(&ctx->context, match);
295e3feb
KH
1384 } else {
1385 index = ctx - ohci->ir_context_list;
8a2f7d93
KH
1386 control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
1387 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1388 if (cycle >= 0) {
1389 match |= (cycle & 0x07fff) << 12;
1390 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1391 }
ed568912 1392
295e3feb
KH
1393 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1394 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
8a2f7d93
KH
1395 reg_write(ohci, context_match(ctx->context.regs), match);
1396 context_run(&ctx->context, control);
295e3feb 1397 }
ed568912
KH
1398
1399 return 0;
1400}
1401
b8295668
KH
1402static int ohci_stop_iso(struct fw_iso_context *base)
1403{
1404 struct fw_ohci *ohci = fw_ohci(base->card);
373b2edd 1405 struct iso_context *ctx = container_of(base, struct iso_context, base);
b8295668
KH
1406 int index;
1407
1408 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1409 index = ctx - ohci->it_context_list;
1410 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1411 } else {
1412 index = ctx - ohci->ir_context_list;
1413 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1414 }
1415 flush_writes(ohci);
1416 context_stop(&ctx->context);
1417
1418 return 0;
1419}
1420
ed568912
KH
1421static void ohci_free_iso_context(struct fw_iso_context *base)
1422{
1423 struct fw_ohci *ohci = fw_ohci(base->card);
373b2edd 1424 struct iso_context *ctx = container_of(base, struct iso_context, base);
ed568912
KH
1425 unsigned long flags;
1426 int index;
1427
b8295668
KH
1428 ohci_stop_iso(base);
1429 context_release(&ctx->context);
9b32d5f3 1430 free_page((unsigned long)ctx->header);
b8295668 1431
ed568912
KH
1432 spin_lock_irqsave(&ohci->lock, flags);
1433
1434 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1435 index = ctx - ohci->it_context_list;
ed568912
KH
1436 ohci->it_context_mask |= 1 << index;
1437 } else {
1438 index = ctx - ohci->ir_context_list;
ed568912
KH
1439 ohci->ir_context_mask |= 1 << index;
1440 }
ed568912
KH
1441
1442 spin_unlock_irqrestore(&ohci->lock, flags);
1443}
1444
1445static int
295e3feb
KH
1446ohci_queue_iso_transmit(struct fw_iso_context *base,
1447 struct fw_iso_packet *packet,
1448 struct fw_iso_buffer *buffer,
1449 unsigned long payload)
ed568912 1450{
373b2edd 1451 struct iso_context *ctx = container_of(base, struct iso_context, base);
30200739 1452 struct descriptor *d, *last, *pd;
ed568912
KH
1453 struct fw_iso_packet *p;
1454 __le32 *header;
9aad8125 1455 dma_addr_t d_bus, page_bus;
ed568912
KH
1456 u32 z, header_z, payload_z, irq;
1457 u32 payload_index, payload_end_index, next_page_index;
30200739 1458 int page, end_page, i, length, offset;
ed568912
KH
1459
1460 /* FIXME: Cycle lost behavior should be configurable: lose
1461 * packet, retransmit or terminate.. */
1462
1463 p = packet;
9aad8125 1464 payload_index = payload;
ed568912
KH
1465
1466 if (p->skip)
1467 z = 1;
1468 else
1469 z = 2;
1470 if (p->header_length > 0)
1471 z++;
1472
1473 /* Determine the first page the payload isn't contained in. */
1474 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1475 if (p->payload_length > 0)
1476 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1477 else
1478 payload_z = 0;
1479
1480 z += payload_z;
1481
1482 /* Get header size in number of descriptors. */
1483 header_z = DIV_ROUND_UP(p->header_length, sizeof *d);
1484
30200739
KH
1485 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1486 if (d == NULL)
1487 return -ENOMEM;
ed568912
KH
1488
1489 if (!p->skip) {
1490 d[0].control = cpu_to_le16(descriptor_key_immediate);
1491 d[0].req_count = cpu_to_le16(8);
1492
1493 header = (__le32 *) &d[1];
1494 header[0] = cpu_to_le32(it_header_sy(p->sy) |
1495 it_header_tag(p->tag) |
1496 it_header_tcode(TCODE_STREAM_DATA) |
1497 it_header_channel(ctx->base.channel) |
1498 it_header_speed(ctx->base.speed));
1499 header[1] =
1500 cpu_to_le32(it_header_data_length(p->header_length +
1501 p->payload_length));
1502 }
1503
1504 if (p->header_length > 0) {
1505 d[2].req_count = cpu_to_le16(p->header_length);
1506 d[2].data_address = cpu_to_le32(d_bus + z * sizeof *d);
1507 memcpy(&d[z], p->header, p->header_length);
1508 }
1509
1510 pd = d + z - payload_z;
1511 payload_end_index = payload_index + p->payload_length;
1512 for (i = 0; i < payload_z; i++) {
1513 page = payload_index >> PAGE_SHIFT;
1514 offset = payload_index & ~PAGE_MASK;
1515 next_page_index = (page + 1) << PAGE_SHIFT;
1516 length =
1517 min(next_page_index, payload_end_index) - payload_index;
1518 pd[i].req_count = cpu_to_le16(length);
9aad8125
KH
1519
1520 page_bus = page_private(buffer->pages[page]);
1521 pd[i].data_address = cpu_to_le32(page_bus + offset);
ed568912
KH
1522
1523 payload_index += length;
1524 }
1525
ed568912
KH
1526 if (p->interrupt)
1527 irq = descriptor_irq_always;
1528 else
1529 irq = descriptor_no_irq;
1530
30200739 1531 last = z == 2 ? d : d + z - 1;
cbb59da7
KH
1532 last->control |= cpu_to_le16(descriptor_output_last |
1533 descriptor_status |
1534 descriptor_branch_always |
1535 irq);
ed568912 1536
30200739 1537 context_append(&ctx->context, d, z, header_z);
ed568912
KH
1538
1539 return 0;
1540}
373b2edd 1541
295e3feb 1542static int
d2746dc1
KH
1543ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1544 struct fw_iso_packet *packet,
1545 struct fw_iso_buffer *buffer,
1546 unsigned long payload)
295e3feb
KH
1547{
1548 struct iso_context *ctx = container_of(base, struct iso_context, base);
1549 struct db_descriptor *db = NULL;
1550 struct descriptor *d;
1551 struct fw_iso_packet *p;
1552 dma_addr_t d_bus, page_bus;
1553 u32 z, header_z, length, rest;
c70dc788 1554 int page, offset, packet_count, header_size;
373b2edd 1555
295e3feb
KH
1556 /* FIXME: Cycle lost behavior should be configurable: lose
1557 * packet, retransmit or terminate.. */
1558
c70dc788
KH
1559 if (packet->skip) {
1560 d = context_get_descriptors(&ctx->context, 2, &d_bus);
1561 if (d == NULL)
1562 return -ENOMEM;
1563
1564 db = (struct db_descriptor *) d;
1565 db->control = cpu_to_le16(descriptor_status |
1566 descriptor_branch_always |
1567 descriptor_wait);
1568 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1569 context_append(&ctx->context, d, 2, 0);
1570 }
98b6cbe8 1571
295e3feb
KH
1572 p = packet;
1573 z = 2;
1574
c70dc788
KH
1575 /* The OHCI controller puts the status word in the header
1576 * buffer too, so we need 4 extra bytes per packet. */
1577 packet_count = p->header_length / ctx->base.header_size;
1578 header_size = packet_count * (ctx->base.header_size + 4);
1579
295e3feb 1580 /* Get header size in number of descriptors. */
c70dc788 1581 header_z = DIV_ROUND_UP(header_size, sizeof *d);
295e3feb
KH
1582 page = payload >> PAGE_SHIFT;
1583 offset = payload & ~PAGE_MASK;
1584 rest = p->payload_length;
1585
1586 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
295e3feb
KH
1587 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1588 while (rest > 0) {
1589 d = context_get_descriptors(&ctx->context,
1590 z + header_z, &d_bus);
1591 if (d == NULL)
1592 return -ENOMEM;
1593
1594 db = (struct db_descriptor *) d;
1595 db->control = cpu_to_le16(descriptor_status |
1596 descriptor_branch_always);
c70dc788
KH
1597 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1598 db->first_req_count = cpu_to_le16(header_size);
1e1d196b 1599 db->first_res_count = db->first_req_count;
295e3feb 1600 db->first_buffer = cpu_to_le32(d_bus + sizeof *db);
373b2edd 1601
295e3feb
KH
1602 if (offset + rest < PAGE_SIZE)
1603 length = rest;
1604 else
1605 length = PAGE_SIZE - offset;
1606
1e1d196b
KH
1607 db->second_req_count = cpu_to_le16(length);
1608 db->second_res_count = db->second_req_count;
295e3feb
KH
1609 page_bus = page_private(buffer->pages[page]);
1610 db->second_buffer = cpu_to_le32(page_bus + offset);
1611
cb2d2cdb
KH
1612 if (p->interrupt && length == rest)
1613 db->control |= cpu_to_le16(descriptor_irq_always);
1614
295e3feb
KH
1615 context_append(&ctx->context, d, z, header_z);
1616 offset = (offset + length) & ~PAGE_MASK;
1617 rest -= length;
1618 page++;
1619 }
1620
d2746dc1
KH
1621 return 0;
1622}
21efb3cf 1623
295e3feb
KH
1624static int
1625ohci_queue_iso(struct fw_iso_context *base,
1626 struct fw_iso_packet *packet,
1627 struct fw_iso_buffer *buffer,
1628 unsigned long payload)
1629{
e364cf4e
KH
1630 struct iso_context *ctx = container_of(base, struct iso_context, base);
1631
295e3feb
KH
1632 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1633 return ohci_queue_iso_transmit(base, packet, buffer, payload);
e364cf4e 1634 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
d2746dc1
KH
1635 return ohci_queue_iso_receive_dualbuffer(base, packet,
1636 buffer, payload);
e364cf4e
KH
1637 else
1638 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1639 return -EINVAL;
295e3feb
KH
1640}
1641
21ebcd12 1642static const struct fw_card_driver ohci_driver = {
ed568912
KH
1643 .name = ohci_driver_name,
1644 .enable = ohci_enable,
1645 .update_phy_reg = ohci_update_phy_reg,
1646 .set_config_rom = ohci_set_config_rom,
1647 .send_request = ohci_send_request,
1648 .send_response = ohci_send_response,
730c32f5 1649 .cancel_packet = ohci_cancel_packet,
ed568912 1650 .enable_phys_dma = ohci_enable_phys_dma,
d60d7f1d 1651 .get_bus_time = ohci_get_bus_time,
ed568912
KH
1652
1653 .allocate_iso_context = ohci_allocate_iso_context,
1654 .free_iso_context = ohci_free_iso_context,
1655 .queue_iso = ohci_queue_iso,
69cdb726 1656 .start_iso = ohci_start_iso,
b8295668 1657 .stop_iso = ohci_stop_iso,
ed568912
KH
1658};
1659
1660static int software_reset(struct fw_ohci *ohci)
1661{
1662 int i;
1663
1664 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1665
1666 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1667 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1668 OHCI1394_HCControl_softReset) == 0)
1669 return 0;
1670 msleep(1);
1671 }
1672
1673 return -EBUSY;
1674}
1675
1676/* ---------- pci subsystem interface ---------- */
1677
1678enum {
1679 CLEANUP_SELF_ID,
1680 CLEANUP_REGISTERS,
1681 CLEANUP_IOMEM,
1682 CLEANUP_DISABLE,
1683 CLEANUP_PUT_CARD,
1684};
1685
1686static int cleanup(struct fw_ohci *ohci, int stage, int code)
1687{
1688 struct pci_dev *dev = to_pci_dev(ohci->card.device);
1689
1690 switch (stage) {
1691 case CLEANUP_SELF_ID:
1692 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1693 ohci->self_id_cpu, ohci->self_id_bus);
1694 case CLEANUP_REGISTERS:
1695 kfree(ohci->it_context_list);
1696 kfree(ohci->ir_context_list);
1697 pci_iounmap(dev, ohci->registers);
1698 case CLEANUP_IOMEM:
1699 pci_release_region(dev, 0);
1700 case CLEANUP_DISABLE:
1701 pci_disable_device(dev);
1702 case CLEANUP_PUT_CARD:
1703 fw_card_put(&ohci->card);
1704 }
1705
1706 return code;
1707}
1708
1709static int __devinit
1710pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1711{
1712 struct fw_ohci *ohci;
e364cf4e 1713 u32 bus_options, max_receive, link_speed;
ed568912
KH
1714 u64 guid;
1715 int error_code;
1716 size_t size;
1717
1718 ohci = kzalloc(sizeof *ohci, GFP_KERNEL);
1719 if (ohci == NULL) {
1720 fw_error("Could not malloc fw_ohci data.\n");
1721 return -ENOMEM;
1722 }
1723
1724 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1725
1726 if (pci_enable_device(dev)) {
1727 fw_error("Failed to enable OHCI hardware.\n");
1728 return cleanup(ohci, CLEANUP_PUT_CARD, -ENODEV);
1729 }
1730
1731 pci_set_master(dev);
1732 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1733 pci_set_drvdata(dev, ohci);
1734
1735 spin_lock_init(&ohci->lock);
1736
1737 tasklet_init(&ohci->bus_reset_tasklet,
1738 bus_reset_tasklet, (unsigned long)ohci);
1739
1740 if (pci_request_region(dev, 0, ohci_driver_name)) {
1741 fw_error("MMIO resource unavailable\n");
1742 return cleanup(ohci, CLEANUP_DISABLE, -EBUSY);
1743 }
1744
1745 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1746 if (ohci->registers == NULL) {
1747 fw_error("Failed to remap registers\n");
1748 return cleanup(ohci, CLEANUP_IOMEM, -ENXIO);
1749 }
1750
1751 if (software_reset(ohci)) {
1752 fw_error("Failed to reset ohci card.\n");
1753 return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY);
1754 }
1755
1756 /* Now enable LPS, which we need in order to start accessing
1757 * most of the registers. In fact, on some cards (ALI M5251),
1758 * accessing registers in the SClk domain without LPS enabled
1759 * will lock up the machine. Wait 50msec to make sure we have
1760 * full link enabled. */
1761 reg_write(ohci, OHCI1394_HCControlSet,
1762 OHCI1394_HCControl_LPS |
1763 OHCI1394_HCControl_postedWriteEnable);
1764 flush_writes(ohci);
1765 msleep(50);
1766
1767 reg_write(ohci, OHCI1394_HCControlClear,
1768 OHCI1394_HCControl_noByteSwapData);
1769
1770 reg_write(ohci, OHCI1394_LinkControlSet,
1771 OHCI1394_LinkControl_rcvSelfID |
1772 OHCI1394_LinkControl_cycleTimerEnable |
1773 OHCI1394_LinkControl_cycleMaster);
1774
1775 ar_context_init(&ohci->ar_request_ctx, ohci,
1776 OHCI1394_AsReqRcvContextControlSet);
1777
1778 ar_context_init(&ohci->ar_response_ctx, ohci,
1779 OHCI1394_AsRspRcvContextControlSet);
1780
f319b6a0
KH
1781 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1782 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
ed568912 1783
f319b6a0
KH
1784 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1785 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
ed568912
KH
1786
1787 reg_write(ohci, OHCI1394_ATRetries,
1788 OHCI1394_MAX_AT_REQ_RETRIES |
1789 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1790 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1791
1792 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1793 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1794 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1795 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1796 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1797
1798 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1799 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1800 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1801 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1802 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1803
1804 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1805 fw_error("Out of memory for it/ir contexts.\n");
1806 return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM);
1807 }
1808
1809 /* self-id dma buffer allocation */
1810 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1811 SELF_ID_BUF_SIZE,
1812 &ohci->self_id_bus,
1813 GFP_KERNEL);
1814 if (ohci->self_id_cpu == NULL) {
1815 fw_error("Out of memory for self ID buffer.\n");
1816 return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM);
1817 }
1818
1819 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1820 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1821 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1822 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1823 reg_write(ohci, OHCI1394_IntMaskSet,
1824 OHCI1394_selfIDComplete |
1825 OHCI1394_RQPkt | OHCI1394_RSPkt |
1826 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1827 OHCI1394_isochRx | OHCI1394_isochTx |
d60d7f1d
KH
1828 OHCI1394_masterIntEnable |
1829 OHCI1394_cycle64Seconds);
ed568912
KH
1830
1831 bus_options = reg_read(ohci, OHCI1394_BusOptions);
1832 max_receive = (bus_options >> 12) & 0xf;
1833 link_speed = bus_options & 0x7;
1834 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1835 reg_read(ohci, OHCI1394_GUIDLo);
1836
1837 error_code = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1838 if (error_code < 0)
1839 return cleanup(ohci, CLEANUP_SELF_ID, error_code);
1840
e364cf4e 1841 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
500be725 1842 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
e364cf4e 1843 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
ed568912
KH
1844
1845 return 0;
1846}
1847
1848static void pci_remove(struct pci_dev *dev)
1849{
1850 struct fw_ohci *ohci;
1851
1852 ohci = pci_get_drvdata(dev);
e254a4b4
KH
1853 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1854 flush_writes(ohci);
ed568912
KH
1855 fw_core_remove_card(&ohci->card);
1856
1857 /* FIXME: Fail all pending packets here, now that the upper
1858 * layers can't queue any more. */
1859
1860 software_reset(ohci);
1861 free_irq(dev->irq, ohci);
1862 cleanup(ohci, CLEANUP_SELF_ID, 0);
1863
1864 fw_notify("Removed fw-ohci device.\n");
1865}
1866
1867static struct pci_device_id pci_table[] = {
1868 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1869 { }
1870};
1871
1872MODULE_DEVICE_TABLE(pci, pci_table);
1873
1874static struct pci_driver fw_ohci_pci_driver = {
1875 .name = ohci_driver_name,
1876 .id_table = pci_table,
1877 .probe = pci_probe,
1878 .remove = pci_remove,
1879};
1880
1881MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1882MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1883MODULE_LICENSE("GPL");
1884
1885static int __init fw_ohci_init(void)
1886{
1887 return pci_register_driver(&fw_ohci_pci_driver);
1888}
1889
1890static void __exit fw_ohci_cleanup(void)
1891{
1892 pci_unregister_driver(&fw_ohci_pci_driver);
1893}
1894
1895module_init(fw_ohci_init);
1896module_exit(fw_ohci_cleanup);