]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/firewire/fw-ohci.c
firewire: fw-ohci: switch on bus power after resume on PPC PMac
[net-next-2.6.git] / drivers / firewire / fw-ohci.c
CommitLineData
c781c06d
KH
1/*
2 * Driver for OHCI 1394 controllers
ed568912 3 *
ed568912
KH
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
e524f616 21#include <linux/compiler.h>
ed568912 22#include <linux/delay.h>
cf3e72fd 23#include <linux/dma-mapping.h>
c26f0234 24#include <linux/gfp.h>
a7fb60db
SR
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
faa2fb4e 28#include <linux/mm.h>
a7fb60db
SR
29#include <linux/module.h>
30#include <linux/pci.h>
c26f0234 31#include <linux/spinlock.h>
cf3e72fd 32
c26f0234 33#include <asm/page.h>
ee71c2f9 34#include <asm/system.h>
ed568912 35
ea8d006b
SR
36#ifdef CONFIG_PPC_PMAC
37#include <asm/pmac_feature.h>
38#endif
39
ed568912 40#include "fw-ohci.h"
a7fb60db 41#include "fw-transaction.h"
ed568912 42
a77754a7
KH
43#define DESCRIPTOR_OUTPUT_MORE 0
44#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
45#define DESCRIPTOR_INPUT_MORE (2 << 12)
46#define DESCRIPTOR_INPUT_LAST (3 << 12)
47#define DESCRIPTOR_STATUS (1 << 11)
48#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
49#define DESCRIPTOR_PING (1 << 7)
50#define DESCRIPTOR_YY (1 << 6)
51#define DESCRIPTOR_NO_IRQ (0 << 4)
52#define DESCRIPTOR_IRQ_ERROR (1 << 4)
53#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
54#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
55#define DESCRIPTOR_WAIT (3 << 0)
ed568912
KH
56
57struct descriptor {
58 __le16 req_count;
59 __le16 control;
60 __le32 data_address;
61 __le32 branch_address;
62 __le16 res_count;
63 __le16 transfer_status;
64} __attribute__((aligned(16)));
65
295e3feb
KH
66struct db_descriptor {
67 __le16 first_size;
68 __le16 control;
69 __le16 second_req_count;
70 __le16 first_req_count;
71 __le32 branch_address;
72 __le16 second_res_count;
73 __le16 first_res_count;
74 __le32 reserved0;
75 __le32 first_buffer;
76 __le32 second_buffer;
77 __le32 reserved1;
78} __attribute__((aligned(16)));
79
a77754a7
KH
80#define CONTROL_SET(regs) (regs)
81#define CONTROL_CLEAR(regs) ((regs) + 4)
82#define COMMAND_PTR(regs) ((regs) + 12)
83#define CONTEXT_MATCH(regs) ((regs) + 16)
72e318e0 84
32b46093 85struct ar_buffer {
ed568912 86 struct descriptor descriptor;
32b46093
KH
87 struct ar_buffer *next;
88 __le32 data[0];
89};
ed568912 90
32b46093
KH
91struct ar_context {
92 struct fw_ohci *ohci;
93 struct ar_buffer *current_buffer;
94 struct ar_buffer *last_buffer;
95 void *pointer;
72e318e0 96 u32 regs;
ed568912
KH
97 struct tasklet_struct tasklet;
98};
99
30200739
KH
100struct context;
101
102typedef int (*descriptor_callback_t)(struct context *ctx,
103 struct descriptor *d,
104 struct descriptor *last);
fe5ca634
DM
105
106/*
107 * A buffer that contains a block of DMA-able coherent memory used for
108 * storing a portion of a DMA descriptor program.
109 */
110struct descriptor_buffer {
111 struct list_head list;
112 dma_addr_t buffer_bus;
113 size_t buffer_size;
114 size_t used;
115 struct descriptor buffer[0];
116};
117
30200739 118struct context {
373b2edd 119 struct fw_ohci *ohci;
30200739 120 u32 regs;
fe5ca634 121 int total_allocation;
373b2edd 122
fe5ca634
DM
123 /*
124 * List of page-sized buffers for storing DMA descriptors.
125 * Head of list contains buffers in use and tail of list contains
126 * free buffers.
127 */
128 struct list_head buffer_list;
129
130 /*
131 * Pointer to a buffer inside buffer_list that contains the tail
132 * end of the current DMA program.
133 */
134 struct descriptor_buffer *buffer_tail;
135
136 /*
137 * The descriptor containing the branch address of the first
138 * descriptor that has not yet been filled by the device.
139 */
140 struct descriptor *last;
141
142 /*
143 * The last descriptor in the DMA program. It contains the branch
144 * address that must be updated upon appending a new descriptor.
145 */
146 struct descriptor *prev;
30200739
KH
147
148 descriptor_callback_t callback;
149
373b2edd 150 struct tasklet_struct tasklet;
30200739 151};
30200739 152
a77754a7
KH
153#define IT_HEADER_SY(v) ((v) << 0)
154#define IT_HEADER_TCODE(v) ((v) << 4)
155#define IT_HEADER_CHANNEL(v) ((v) << 8)
156#define IT_HEADER_TAG(v) ((v) << 14)
157#define IT_HEADER_SPEED(v) ((v) << 16)
158#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
ed568912
KH
159
160struct iso_context {
161 struct fw_iso_context base;
30200739 162 struct context context;
0642b657 163 int excess_bytes;
9b32d5f3
KH
164 void *header;
165 size_t header_length;
ed568912
KH
166};
167
168#define CONFIG_ROM_SIZE 1024
169
170struct fw_ohci {
171 struct fw_card card;
172
e364cf4e 173 u32 version;
ed568912
KH
174 __iomem char *registers;
175 dma_addr_t self_id_bus;
176 __le32 *self_id_cpu;
177 struct tasklet_struct bus_reset_tasklet;
e636fe25 178 int node_id;
ed568912
KH
179 int generation;
180 int request_generation;
d60d7f1d 181 u32 bus_seconds;
11bf20ad 182 bool old_uninorth;
ed568912 183
c781c06d
KH
184 /*
185 * Spinlock for accessing fw_ohci data. Never call out of
186 * this driver with this lock held.
187 */
ed568912
KH
188 spinlock_t lock;
189 u32 self_id_buffer[512];
190
191 /* Config rom buffers */
192 __be32 *config_rom;
193 dma_addr_t config_rom_bus;
194 __be32 *next_config_rom;
195 dma_addr_t next_config_rom_bus;
196 u32 next_header;
197
198 struct ar_context ar_request_ctx;
199 struct ar_context ar_response_ctx;
f319b6a0
KH
200 struct context at_request_ctx;
201 struct context at_response_ctx;
ed568912
KH
202
203 u32 it_context_mask;
204 struct iso_context *it_context_list;
205 u32 ir_context_mask;
206 struct iso_context *ir_context_list;
207};
208
95688e97 209static inline struct fw_ohci *fw_ohci(struct fw_card *card)
ed568912
KH
210{
211 return container_of(card, struct fw_ohci, card);
212}
213
295e3feb
KH
214#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
215#define IR_CONTEXT_BUFFER_FILL 0x80000000
216#define IR_CONTEXT_ISOCH_HEADER 0x40000000
217#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
218#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
219#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
ed568912
KH
220
221#define CONTEXT_RUN 0x8000
222#define CONTEXT_WAKE 0x1000
223#define CONTEXT_DEAD 0x0800
224#define CONTEXT_ACTIVE 0x0400
225
226#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
227#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
228#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
229
230#define FW_OHCI_MAJOR 240
231#define OHCI1394_REGISTER_SIZE 0x800
232#define OHCI_LOOP_COUNT 500
233#define OHCI1394_PCI_HCI_Control 0x40
234#define SELF_ID_BUF_SIZE 0x800
32b46093 235#define OHCI_TCODE_PHY_PACKET 0x0e
e364cf4e 236#define OHCI_VERSION_1_1 0x010010
0edeefd9 237
ed568912
KH
238static char ohci_driver_name[] = KBUILD_MODNAME;
239
95688e97 240static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
ed568912
KH
241{
242 writel(data, ohci->registers + offset);
243}
244
95688e97 245static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
ed568912
KH
246{
247 return readl(ohci->registers + offset);
248}
249
95688e97 250static inline void flush_writes(const struct fw_ohci *ohci)
ed568912
KH
251{
252 /* Do a dummy read to flush writes. */
253 reg_read(ohci, OHCI1394_Version);
254}
255
256static int
257ohci_update_phy_reg(struct fw_card *card, int addr,
258 int clear_bits, int set_bits)
259{
260 struct fw_ohci *ohci = fw_ohci(card);
261 u32 val, old;
262
263 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
362e901c 264 flush_writes(ohci);
ed568912
KH
265 msleep(2);
266 val = reg_read(ohci, OHCI1394_PhyControl);
267 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
268 fw_error("failed to set phy reg bits.\n");
269 return -EBUSY;
270 }
271
272 old = OHCI1394_PhyControl_ReadData(val);
273 old = (old & ~clear_bits) | set_bits;
274 reg_write(ohci, OHCI1394_PhyControl,
275 OHCI1394_PhyControl_Write(addr, old));
276
277 return 0;
278}
279
32b46093 280static int ar_context_add_page(struct ar_context *ctx)
ed568912 281{
32b46093
KH
282 struct device *dev = ctx->ohci->card.device;
283 struct ar_buffer *ab;
f5101d58 284 dma_addr_t uninitialized_var(ab_bus);
32b46093
KH
285 size_t offset;
286
bde1709a 287 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
32b46093
KH
288 if (ab == NULL)
289 return -ENOMEM;
290
2d826cc5 291 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
a77754a7
KH
292 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
293 DESCRIPTOR_STATUS |
294 DESCRIPTOR_BRANCH_ALWAYS);
32b46093
KH
295 offset = offsetof(struct ar_buffer, data);
296 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
297 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
298 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
299 ab->descriptor.branch_address = 0;
300
ec839e43 301 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
32b46093
KH
302 ctx->last_buffer->next = ab;
303 ctx->last_buffer = ab;
304
a77754a7 305 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
ed568912 306 flush_writes(ctx->ohci);
32b46093
KH
307
308 return 0;
ed568912
KH
309}
310
11bf20ad
SR
311#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
312#define cond_le32_to_cpu(v) \
313 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
314#else
315#define cond_le32_to_cpu(v) le32_to_cpu(v)
316#endif
317
32b46093 318static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
ed568912 319{
ed568912 320 struct fw_ohci *ohci = ctx->ohci;
2639a6fb
KH
321 struct fw_packet p;
322 u32 status, length, tcode;
2639a6fb 323
11bf20ad
SR
324 p.header[0] = cond_le32_to_cpu(buffer[0]);
325 p.header[1] = cond_le32_to_cpu(buffer[1]);
326 p.header[2] = cond_le32_to_cpu(buffer[2]);
2639a6fb
KH
327
328 tcode = (p.header[0] >> 4) & 0x0f;
329 switch (tcode) {
330 case TCODE_WRITE_QUADLET_REQUEST:
331 case TCODE_READ_QUADLET_RESPONSE:
32b46093 332 p.header[3] = (__force __u32) buffer[3];
2639a6fb 333 p.header_length = 16;
32b46093 334 p.payload_length = 0;
2639a6fb
KH
335 break;
336
2639a6fb 337 case TCODE_READ_BLOCK_REQUEST :
11bf20ad 338 p.header[3] = cond_le32_to_cpu(buffer[3]);
32b46093
KH
339 p.header_length = 16;
340 p.payload_length = 0;
341 break;
342
343 case TCODE_WRITE_BLOCK_REQUEST:
2639a6fb
KH
344 case TCODE_READ_BLOCK_RESPONSE:
345 case TCODE_LOCK_REQUEST:
346 case TCODE_LOCK_RESPONSE:
11bf20ad 347 p.header[3] = cond_le32_to_cpu(buffer[3]);
2639a6fb 348 p.header_length = 16;
32b46093 349 p.payload_length = p.header[3] >> 16;
2639a6fb
KH
350 break;
351
352 case TCODE_WRITE_RESPONSE:
353 case TCODE_READ_QUADLET_REQUEST:
32b46093 354 case OHCI_TCODE_PHY_PACKET:
2639a6fb 355 p.header_length = 12;
32b46093 356 p.payload_length = 0;
2639a6fb
KH
357 break;
358 }
ed568912 359
32b46093
KH
360 p.payload = (void *) buffer + p.header_length;
361
362 /* FIXME: What to do about evt_* errors? */
363 length = (p.header_length + p.payload_length + 3) / 4;
11bf20ad 364 status = cond_le32_to_cpu(buffer[length]);
32b46093
KH
365
366 p.ack = ((status >> 16) & 0x1f) - 16;
367 p.speed = (status >> 21) & 0x7;
368 p.timestamp = status & 0xffff;
369 p.generation = ohci->request_generation;
ed568912 370
c781c06d
KH
371 /*
372 * The OHCI bus reset handler synthesizes a phy packet with
ed568912
KH
373 * the new generation number when a bus reset happens (see
374 * section 8.4.2.3). This helps us determine when a request
375 * was received and make sure we send the response in the same
376 * generation. We only need this for requests; for responses
377 * we use the unique tlabel for finding the matching
c781c06d
KH
378 * request.
379 */
ed568912 380
2639a6fb 381 if (p.ack + 16 == 0x09)
25df287d 382 ohci->request_generation = (p.header[2] >> 16) & 0xff;
ed568912 383 else if (ctx == &ohci->ar_request_ctx)
2639a6fb 384 fw_core_handle_request(&ohci->card, &p);
ed568912 385 else
2639a6fb 386 fw_core_handle_response(&ohci->card, &p);
ed568912 387
32b46093
KH
388 return buffer + length + 1;
389}
ed568912 390
32b46093
KH
391static void ar_context_tasklet(unsigned long data)
392{
393 struct ar_context *ctx = (struct ar_context *)data;
394 struct fw_ohci *ohci = ctx->ohci;
395 struct ar_buffer *ab;
396 struct descriptor *d;
397 void *buffer, *end;
398
399 ab = ctx->current_buffer;
400 d = &ab->descriptor;
401
402 if (d->res_count == 0) {
403 size_t size, rest, offset;
6b84236d
JW
404 dma_addr_t start_bus;
405 void *start;
32b46093 406
c781c06d
KH
407 /*
408 * This descriptor is finished and we may have a
32b46093 409 * packet split across this and the next buffer. We
c781c06d
KH
410 * reuse the page for reassembling the split packet.
411 */
32b46093
KH
412
413 offset = offsetof(struct ar_buffer, data);
6b84236d
JW
414 start = buffer = ab;
415 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
32b46093 416
32b46093
KH
417 ab = ab->next;
418 d = &ab->descriptor;
419 size = buffer + PAGE_SIZE - ctx->pointer;
420 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
421 memmove(buffer, ctx->pointer, size);
422 memcpy(buffer + size, ab->data, rest);
423 ctx->current_buffer = ab;
424 ctx->pointer = (void *) ab->data + rest;
425 end = buffer + size + rest;
426
427 while (buffer < end)
428 buffer = handle_ar_packet(ctx, buffer);
429
bde1709a 430 dma_free_coherent(ohci->card.device, PAGE_SIZE,
6b84236d 431 start, start_bus);
32b46093
KH
432 ar_context_add_page(ctx);
433 } else {
434 buffer = ctx->pointer;
435 ctx->pointer = end =
436 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
437
438 while (buffer < end)
439 buffer = handle_ar_packet(ctx, buffer);
440 }
ed568912
KH
441}
442
443static int
72e318e0 444ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
ed568912 445{
32b46093 446 struct ar_buffer ab;
ed568912 447
72e318e0
KH
448 ctx->regs = regs;
449 ctx->ohci = ohci;
450 ctx->last_buffer = &ab;
ed568912
KH
451 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
452
32b46093
KH
453 ar_context_add_page(ctx);
454 ar_context_add_page(ctx);
455 ctx->current_buffer = ab.next;
456 ctx->pointer = ctx->current_buffer->data;
457
2aef469a
KH
458 return 0;
459}
460
461static void ar_context_run(struct ar_context *ctx)
462{
463 struct ar_buffer *ab = ctx->current_buffer;
464 dma_addr_t ab_bus;
465 size_t offset;
466
467 offset = offsetof(struct ar_buffer, data);
0a9972ba 468 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
2aef469a
KH
469
470 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
a77754a7 471 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
32b46093 472 flush_writes(ctx->ohci);
ed568912 473}
373b2edd 474
a186b4a6
JW
475static struct descriptor *
476find_branch_descriptor(struct descriptor *d, int z)
477{
478 int b, key;
479
480 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
481 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
482
483 /* figure out which descriptor the branch address goes in */
484 if (z == 2 && (b == 3 || key == 2))
485 return d;
486 else
487 return d + z - 1;
488}
489
30200739
KH
490static void context_tasklet(unsigned long data)
491{
492 struct context *ctx = (struct context *) data;
30200739
KH
493 struct descriptor *d, *last;
494 u32 address;
495 int z;
fe5ca634 496 struct descriptor_buffer *desc;
30200739 497
fe5ca634
DM
498 desc = list_entry(ctx->buffer_list.next,
499 struct descriptor_buffer, list);
500 last = ctx->last;
30200739 501 while (last->branch_address != 0) {
fe5ca634 502 struct descriptor_buffer *old_desc = desc;
30200739
KH
503 address = le32_to_cpu(last->branch_address);
504 z = address & 0xf;
fe5ca634
DM
505 address &= ~0xf;
506
507 /* If the branch address points to a buffer outside of the
508 * current buffer, advance to the next buffer. */
509 if (address < desc->buffer_bus ||
510 address >= desc->buffer_bus + desc->used)
511 desc = list_entry(desc->list.next,
512 struct descriptor_buffer, list);
513 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
a186b4a6 514 last = find_branch_descriptor(d, z);
30200739
KH
515
516 if (!ctx->callback(ctx, d, last))
517 break;
518
fe5ca634
DM
519 if (old_desc != desc) {
520 /* If we've advanced to the next buffer, move the
521 * previous buffer to the free list. */
522 unsigned long flags;
523 old_desc->used = 0;
524 spin_lock_irqsave(&ctx->ohci->lock, flags);
525 list_move_tail(&old_desc->list, &ctx->buffer_list);
526 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
527 }
528 ctx->last = last;
30200739
KH
529 }
530}
531
fe5ca634
DM
532/*
533 * Allocate a new buffer and add it to the list of free buffers for this
534 * context. Must be called with ohci->lock held.
535 */
536static int
537context_add_buffer(struct context *ctx)
538{
539 struct descriptor_buffer *desc;
f5101d58 540 dma_addr_t uninitialized_var(bus_addr);
fe5ca634
DM
541 int offset;
542
543 /*
544 * 16MB of descriptors should be far more than enough for any DMA
545 * program. This will catch run-away userspace or DoS attacks.
546 */
547 if (ctx->total_allocation >= 16*1024*1024)
548 return -ENOMEM;
549
550 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
551 &bus_addr, GFP_ATOMIC);
552 if (!desc)
553 return -ENOMEM;
554
555 offset = (void *)&desc->buffer - (void *)desc;
556 desc->buffer_size = PAGE_SIZE - offset;
557 desc->buffer_bus = bus_addr + offset;
558 desc->used = 0;
559
560 list_add_tail(&desc->list, &ctx->buffer_list);
561 ctx->total_allocation += PAGE_SIZE;
562
563 return 0;
564}
565
30200739
KH
566static int
567context_init(struct context *ctx, struct fw_ohci *ohci,
fe5ca634 568 u32 regs, descriptor_callback_t callback)
30200739
KH
569{
570 ctx->ohci = ohci;
571 ctx->regs = regs;
fe5ca634
DM
572 ctx->total_allocation = 0;
573
574 INIT_LIST_HEAD(&ctx->buffer_list);
575 if (context_add_buffer(ctx) < 0)
30200739
KH
576 return -ENOMEM;
577
fe5ca634
DM
578 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
579 struct descriptor_buffer, list);
580
30200739
KH
581 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
582 ctx->callback = callback;
583
c781c06d
KH
584 /*
585 * We put a dummy descriptor in the buffer that has a NULL
30200739 586 * branch address and looks like it's been sent. That way we
fe5ca634 587 * have a descriptor to append DMA programs to.
c781c06d 588 */
fe5ca634
DM
589 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
590 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
591 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
592 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
593 ctx->last = ctx->buffer_tail->buffer;
594 ctx->prev = ctx->buffer_tail->buffer;
30200739
KH
595
596 return 0;
597}
598
9b32d5f3 599static void
30200739
KH
600context_release(struct context *ctx)
601{
602 struct fw_card *card = &ctx->ohci->card;
fe5ca634 603 struct descriptor_buffer *desc, *tmp;
30200739 604
fe5ca634
DM
605 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
606 dma_free_coherent(card->device, PAGE_SIZE, desc,
607 desc->buffer_bus -
608 ((void *)&desc->buffer - (void *)desc));
30200739
KH
609}
610
fe5ca634 611/* Must be called with ohci->lock held */
30200739
KH
612static struct descriptor *
613context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
614{
fe5ca634
DM
615 struct descriptor *d = NULL;
616 struct descriptor_buffer *desc = ctx->buffer_tail;
617
618 if (z * sizeof(*d) > desc->buffer_size)
619 return NULL;
620
621 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
622 /* No room for the descriptor in this buffer, so advance to the
623 * next one. */
30200739 624
fe5ca634
DM
625 if (desc->list.next == &ctx->buffer_list) {
626 /* If there is no free buffer next in the list,
627 * allocate one. */
628 if (context_add_buffer(ctx) < 0)
629 return NULL;
630 }
631 desc = list_entry(desc->list.next,
632 struct descriptor_buffer, list);
633 ctx->buffer_tail = desc;
634 }
30200739 635
fe5ca634 636 d = desc->buffer + desc->used / sizeof(*d);
2d826cc5 637 memset(d, 0, z * sizeof(*d));
fe5ca634 638 *d_bus = desc->buffer_bus + desc->used;
30200739
KH
639
640 return d;
641}
642
295e3feb 643static void context_run(struct context *ctx, u32 extra)
30200739
KH
644{
645 struct fw_ohci *ohci = ctx->ohci;
646
a77754a7 647 reg_write(ohci, COMMAND_PTR(ctx->regs),
fe5ca634 648 le32_to_cpu(ctx->last->branch_address));
a77754a7
KH
649 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
650 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
30200739
KH
651 flush_writes(ohci);
652}
653
654static void context_append(struct context *ctx,
655 struct descriptor *d, int z, int extra)
656{
657 dma_addr_t d_bus;
fe5ca634 658 struct descriptor_buffer *desc = ctx->buffer_tail;
30200739 659
fe5ca634 660 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
30200739 661
fe5ca634
DM
662 desc->used += (z + extra) * sizeof(*d);
663 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
664 ctx->prev = find_branch_descriptor(d, z);
30200739 665
a77754a7 666 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
30200739
KH
667 flush_writes(ctx->ohci);
668}
669
670static void context_stop(struct context *ctx)
671{
672 u32 reg;
b8295668 673 int i;
30200739 674
a77754a7 675 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
b8295668 676 flush_writes(ctx->ohci);
30200739 677
b8295668 678 for (i = 0; i < 10; i++) {
a77754a7 679 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
b8295668
KH
680 if ((reg & CONTEXT_ACTIVE) == 0)
681 break;
682
683 fw_notify("context_stop: still active (0x%08x)\n", reg);
b980f5a2 684 mdelay(1);
b8295668 685 }
30200739 686}
ed568912 687
f319b6a0
KH
688struct driver_data {
689 struct fw_packet *packet;
690};
ed568912 691
c781c06d
KH
692/*
693 * This function apppends a packet to the DMA queue for transmission.
f319b6a0 694 * Must always be called with the ochi->lock held to ensure proper
c781c06d
KH
695 * generation handling and locking around packet queue manipulation.
696 */
f319b6a0
KH
697static int
698at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
ed568912 699{
ed568912 700 struct fw_ohci *ohci = ctx->ohci;
4b6d51ec 701 dma_addr_t d_bus, uninitialized_var(payload_bus);
f319b6a0
KH
702 struct driver_data *driver_data;
703 struct descriptor *d, *last;
704 __le32 *header;
ed568912 705 int z, tcode;
f319b6a0 706 u32 reg;
ed568912 707
f319b6a0
KH
708 d = context_get_descriptors(ctx, 4, &d_bus);
709 if (d == NULL) {
710 packet->ack = RCODE_SEND_ERROR;
711 return -1;
ed568912
KH
712 }
713
a77754a7 714 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
f319b6a0
KH
715 d[0].res_count = cpu_to_le16(packet->timestamp);
716
c781c06d
KH
717 /*
718 * The DMA format for asyncronous link packets is different
ed568912
KH
719 * from the IEEE1394 layout, so shift the fields around
720 * accordingly. If header_length is 8, it's a PHY packet, to
c781c06d
KH
721 * which we need to prepend an extra quadlet.
722 */
f319b6a0
KH
723
724 header = (__le32 *) &d[1];
ed568912 725 if (packet->header_length > 8) {
f319b6a0
KH
726 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
727 (packet->speed << 16));
728 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
729 (packet->header[0] & 0xffff0000));
730 header[2] = cpu_to_le32(packet->header[2]);
ed568912
KH
731
732 tcode = (packet->header[0] >> 4) & 0x0f;
733 if (TCODE_IS_BLOCK_PACKET(tcode))
f319b6a0 734 header[3] = cpu_to_le32(packet->header[3]);
ed568912 735 else
f319b6a0
KH
736 header[3] = (__force __le32) packet->header[3];
737
738 d[0].req_count = cpu_to_le16(packet->header_length);
ed568912 739 } else {
f319b6a0
KH
740 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
741 (packet->speed << 16));
742 header[1] = cpu_to_le32(packet->header[0]);
743 header[2] = cpu_to_le32(packet->header[1]);
744 d[0].req_count = cpu_to_le16(12);
ed568912
KH
745 }
746
f319b6a0
KH
747 driver_data = (struct driver_data *) &d[3];
748 driver_data->packet = packet;
20d11673 749 packet->driver_data = driver_data;
a186b4a6 750
f319b6a0
KH
751 if (packet->payload_length > 0) {
752 payload_bus =
753 dma_map_single(ohci->card.device, packet->payload,
754 packet->payload_length, DMA_TO_DEVICE);
755 if (dma_mapping_error(payload_bus)) {
756 packet->ack = RCODE_SEND_ERROR;
757 return -1;
758 }
759
760 d[2].req_count = cpu_to_le16(packet->payload_length);
761 d[2].data_address = cpu_to_le32(payload_bus);
762 last = &d[2];
763 z = 3;
ed568912 764 } else {
f319b6a0
KH
765 last = &d[0];
766 z = 2;
ed568912 767 }
ed568912 768
a77754a7
KH
769 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
770 DESCRIPTOR_IRQ_ALWAYS |
771 DESCRIPTOR_BRANCH_ALWAYS);
ed568912 772
f319b6a0
KH
773 /* FIXME: Document how the locking works. */
774 if (ohci->generation != packet->generation) {
ab88ca48
SR
775 if (packet->payload_length > 0)
776 dma_unmap_single(ohci->card.device, payload_bus,
777 packet->payload_length, DMA_TO_DEVICE);
f319b6a0
KH
778 packet->ack = RCODE_GENERATION;
779 return -1;
780 }
781
782 context_append(ctx, d, z, 4 - z);
ed568912 783
f319b6a0 784 /* If the context isn't already running, start it up. */
a77754a7 785 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
053b3080 786 if ((reg & CONTEXT_RUN) == 0)
f319b6a0
KH
787 context_run(ctx, 0);
788
789 return 0;
ed568912
KH
790}
791
f319b6a0
KH
792static int handle_at_packet(struct context *context,
793 struct descriptor *d,
794 struct descriptor *last)
ed568912 795{
f319b6a0 796 struct driver_data *driver_data;
ed568912 797 struct fw_packet *packet;
f319b6a0
KH
798 struct fw_ohci *ohci = context->ohci;
799 dma_addr_t payload_bus;
ed568912
KH
800 int evt;
801
f319b6a0
KH
802 if (last->transfer_status == 0)
803 /* This descriptor isn't done yet, stop iteration. */
804 return 0;
ed568912 805
f319b6a0
KH
806 driver_data = (struct driver_data *) &d[3];
807 packet = driver_data->packet;
808 if (packet == NULL)
809 /* This packet was cancelled, just continue. */
810 return 1;
730c32f5 811
f319b6a0
KH
812 payload_bus = le32_to_cpu(last->data_address);
813 if (payload_bus != 0)
814 dma_unmap_single(ohci->card.device, payload_bus,
ed568912 815 packet->payload_length, DMA_TO_DEVICE);
ed568912 816
f319b6a0
KH
817 evt = le16_to_cpu(last->transfer_status) & 0x1f;
818 packet->timestamp = le16_to_cpu(last->res_count);
ed568912 819
f319b6a0
KH
820 switch (evt) {
821 case OHCI1394_evt_timeout:
822 /* Async response transmit timed out. */
823 packet->ack = RCODE_CANCELLED;
824 break;
ed568912 825
f319b6a0 826 case OHCI1394_evt_flushed:
c781c06d
KH
827 /*
828 * The packet was flushed should give same error as
829 * when we try to use a stale generation count.
830 */
f319b6a0
KH
831 packet->ack = RCODE_GENERATION;
832 break;
ed568912 833
f319b6a0 834 case OHCI1394_evt_missing_ack:
c781c06d
KH
835 /*
836 * Using a valid (current) generation count, but the
837 * node is not on the bus or not sending acks.
838 */
f319b6a0
KH
839 packet->ack = RCODE_NO_ACK;
840 break;
ed568912 841
f319b6a0
KH
842 case ACK_COMPLETE + 0x10:
843 case ACK_PENDING + 0x10:
844 case ACK_BUSY_X + 0x10:
845 case ACK_BUSY_A + 0x10:
846 case ACK_BUSY_B + 0x10:
847 case ACK_DATA_ERROR + 0x10:
848 case ACK_TYPE_ERROR + 0x10:
849 packet->ack = evt - 0x10;
850 break;
ed568912 851
f319b6a0
KH
852 default:
853 packet->ack = RCODE_SEND_ERROR;
854 break;
855 }
ed568912 856
f319b6a0 857 packet->callback(packet, &ohci->card, packet->ack);
ed568912 858
f319b6a0 859 return 1;
ed568912
KH
860}
861
a77754a7
KH
862#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
863#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
864#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
865#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
866#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
93c4cceb
KH
867
868static void
869handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
870{
871 struct fw_packet response;
872 int tcode, length, i;
873
a77754a7 874 tcode = HEADER_GET_TCODE(packet->header[0]);
93c4cceb 875 if (TCODE_IS_BLOCK_PACKET(tcode))
a77754a7 876 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
93c4cceb
KH
877 else
878 length = 4;
879
880 i = csr - CSR_CONFIG_ROM;
881 if (i + length > CONFIG_ROM_SIZE) {
882 fw_fill_response(&response, packet->header,
883 RCODE_ADDRESS_ERROR, NULL, 0);
884 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
885 fw_fill_response(&response, packet->header,
886 RCODE_TYPE_ERROR, NULL, 0);
887 } else {
888 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
889 (void *) ohci->config_rom + i, length);
890 }
891
892 fw_core_handle_response(&ohci->card, &response);
893}
894
895static void
896handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
897{
898 struct fw_packet response;
899 int tcode, length, ext_tcode, sel;
900 __be32 *payload, lock_old;
901 u32 lock_arg, lock_data;
902
a77754a7
KH
903 tcode = HEADER_GET_TCODE(packet->header[0]);
904 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
93c4cceb 905 payload = packet->payload;
a77754a7 906 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
93c4cceb
KH
907
908 if (tcode == TCODE_LOCK_REQUEST &&
909 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
910 lock_arg = be32_to_cpu(payload[0]);
911 lock_data = be32_to_cpu(payload[1]);
912 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
913 lock_arg = 0;
914 lock_data = 0;
915 } else {
916 fw_fill_response(&response, packet->header,
917 RCODE_TYPE_ERROR, NULL, 0);
918 goto out;
919 }
920
921 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
922 reg_write(ohci, OHCI1394_CSRData, lock_data);
923 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
924 reg_write(ohci, OHCI1394_CSRControl, sel);
925
926 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
927 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
928 else
929 fw_notify("swap not done yet\n");
930
931 fw_fill_response(&response, packet->header,
2d826cc5 932 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
93c4cceb
KH
933 out:
934 fw_core_handle_response(&ohci->card, &response);
935}
936
937static void
f319b6a0 938handle_local_request(struct context *ctx, struct fw_packet *packet)
93c4cceb
KH
939{
940 u64 offset;
941 u32 csr;
942
473d28c7
KH
943 if (ctx == &ctx->ohci->at_request_ctx) {
944 packet->ack = ACK_PENDING;
945 packet->callback(packet, &ctx->ohci->card, packet->ack);
946 }
93c4cceb
KH
947
948 offset =
949 ((unsigned long long)
a77754a7 950 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
93c4cceb
KH
951 packet->header[2];
952 csr = offset - CSR_REGISTER_BASE;
953
954 /* Handle config rom reads. */
955 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
956 handle_local_rom(ctx->ohci, packet, csr);
957 else switch (csr) {
958 case CSR_BUS_MANAGER_ID:
959 case CSR_BANDWIDTH_AVAILABLE:
960 case CSR_CHANNELS_AVAILABLE_HI:
961 case CSR_CHANNELS_AVAILABLE_LO:
962 handle_local_lock(ctx->ohci, packet, csr);
963 break;
964 default:
965 if (ctx == &ctx->ohci->at_request_ctx)
966 fw_core_handle_request(&ctx->ohci->card, packet);
967 else
968 fw_core_handle_response(&ctx->ohci->card, packet);
969 break;
970 }
473d28c7
KH
971
972 if (ctx == &ctx->ohci->at_response_ctx) {
973 packet->ack = ACK_COMPLETE;
974 packet->callback(packet, &ctx->ohci->card, packet->ack);
975 }
93c4cceb 976}
e636fe25 977
ed568912 978static void
f319b6a0 979at_context_transmit(struct context *ctx, struct fw_packet *packet)
ed568912 980{
ed568912 981 unsigned long flags;
f319b6a0 982 int retval;
ed568912
KH
983
984 spin_lock_irqsave(&ctx->ohci->lock, flags);
985
a77754a7 986 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
e636fe25 987 ctx->ohci->generation == packet->generation) {
93c4cceb
KH
988 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
989 handle_local_request(ctx, packet);
990 return;
e636fe25 991 }
ed568912 992
f319b6a0 993 retval = at_context_queue_packet(ctx, packet);
ed568912
KH
994 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
995
f319b6a0
KH
996 if (retval < 0)
997 packet->callback(packet, &ctx->ohci->card, packet->ack);
a186b4a6 998
ed568912
KH
999}
1000
1001static void bus_reset_tasklet(unsigned long data)
1002{
1003 struct fw_ohci *ohci = (struct fw_ohci *)data;
e636fe25 1004 int self_id_count, i, j, reg;
ed568912
KH
1005 int generation, new_generation;
1006 unsigned long flags;
4eaff7d6
SR
1007 void *free_rom = NULL;
1008 dma_addr_t free_rom_bus = 0;
ed568912
KH
1009
1010 reg = reg_read(ohci, OHCI1394_NodeID);
1011 if (!(reg & OHCI1394_NodeID_idValid)) {
02ff8f8e 1012 fw_notify("node ID not valid, new bus reset in progress\n");
ed568912
KH
1013 return;
1014 }
02ff8f8e
SR
1015 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1016 fw_notify("malconfigured bus\n");
1017 return;
1018 }
1019 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1020 OHCI1394_NodeID_nodeNumber);
ed568912 1021
c781c06d
KH
1022 /*
1023 * The count in the SelfIDCount register is the number of
ed568912
KH
1024 * bytes in the self ID receive buffer. Since we also receive
1025 * the inverted quadlets and a header quadlet, we shift one
c781c06d
KH
1026 * bit extra to get the actual number of self IDs.
1027 */
ed568912
KH
1028
1029 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
11bf20ad 1030 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
ee71c2f9 1031 rmb();
ed568912
KH
1032
1033 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1034 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
1035 fw_error("inconsistent self IDs\n");
11bf20ad
SR
1036 ohci->self_id_buffer[j] =
1037 cond_le32_to_cpu(ohci->self_id_cpu[i]);
ed568912 1038 }
ee71c2f9 1039 rmb();
ed568912 1040
c781c06d
KH
1041 /*
1042 * Check the consistency of the self IDs we just read. The
ed568912
KH
1043 * problem we face is that a new bus reset can start while we
1044 * read out the self IDs from the DMA buffer. If this happens,
1045 * the DMA buffer will be overwritten with new self IDs and we
1046 * will read out inconsistent data. The OHCI specification
1047 * (section 11.2) recommends a technique similar to
1048 * linux/seqlock.h, where we remember the generation of the
1049 * self IDs in the buffer before reading them out and compare
1050 * it to the current generation after reading them out. If
1051 * the two generations match we know we have a consistent set
c781c06d
KH
1052 * of self IDs.
1053 */
ed568912
KH
1054
1055 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1056 if (new_generation != generation) {
1057 fw_notify("recursive bus reset detected, "
1058 "discarding self ids\n");
1059 return;
1060 }
1061
1062 /* FIXME: Document how the locking works. */
1063 spin_lock_irqsave(&ohci->lock, flags);
1064
1065 ohci->generation = generation;
f319b6a0
KH
1066 context_stop(&ohci->at_request_ctx);
1067 context_stop(&ohci->at_response_ctx);
ed568912
KH
1068 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1069
c781c06d
KH
1070 /*
1071 * This next bit is unrelated to the AT context stuff but we
ed568912
KH
1072 * have to do it under the spinlock also. If a new config rom
1073 * was set up before this reset, the old one is now no longer
1074 * in use and we can free it. Update the config rom pointers
1075 * to point to the current config rom and clear the
c781c06d
KH
1076 * next_config_rom pointer so a new udpate can take place.
1077 */
ed568912
KH
1078
1079 if (ohci->next_config_rom != NULL) {
0bd243c4
KH
1080 if (ohci->next_config_rom != ohci->config_rom) {
1081 free_rom = ohci->config_rom;
1082 free_rom_bus = ohci->config_rom_bus;
1083 }
ed568912
KH
1084 ohci->config_rom = ohci->next_config_rom;
1085 ohci->config_rom_bus = ohci->next_config_rom_bus;
1086 ohci->next_config_rom = NULL;
1087
c781c06d
KH
1088 /*
1089 * Restore config_rom image and manually update
ed568912
KH
1090 * config_rom registers. Writing the header quadlet
1091 * will indicate that the config rom is ready, so we
c781c06d
KH
1092 * do that last.
1093 */
ed568912
KH
1094 reg_write(ohci, OHCI1394_BusOptions,
1095 be32_to_cpu(ohci->config_rom[2]));
1096 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
1097 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
1098 }
1099
080de8c2
SR
1100#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1101 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1102 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1103#endif
1104
ed568912
KH
1105 spin_unlock_irqrestore(&ohci->lock, flags);
1106
4eaff7d6
SR
1107 if (free_rom)
1108 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1109 free_rom, free_rom_bus);
1110
e636fe25 1111 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
ed568912
KH
1112 self_id_count, ohci->self_id_buffer);
1113}
1114
1115static irqreturn_t irq_handler(int irq, void *data)
1116{
1117 struct fw_ohci *ohci = data;
d60d7f1d 1118 u32 event, iso_event, cycle_time;
ed568912
KH
1119 int i;
1120
1121 event = reg_read(ohci, OHCI1394_IntEventClear);
1122
a515958d 1123 if (!event || !~event)
ed568912
KH
1124 return IRQ_NONE;
1125
1126 reg_write(ohci, OHCI1394_IntEventClear, event);
1127
1128 if (event & OHCI1394_selfIDComplete)
1129 tasklet_schedule(&ohci->bus_reset_tasklet);
1130
1131 if (event & OHCI1394_RQPkt)
1132 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1133
1134 if (event & OHCI1394_RSPkt)
1135 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1136
1137 if (event & OHCI1394_reqTxComplete)
1138 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1139
1140 if (event & OHCI1394_respTxComplete)
1141 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1142
c889475f 1143 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
ed568912
KH
1144 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1145
1146 while (iso_event) {
1147 i = ffs(iso_event) - 1;
30200739 1148 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
ed568912
KH
1149 iso_event &= ~(1 << i);
1150 }
1151
c889475f 1152 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
ed568912
KH
1153 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1154
1155 while (iso_event) {
1156 i = ffs(iso_event) - 1;
30200739 1157 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
ed568912
KH
1158 iso_event &= ~(1 << i);
1159 }
1160
e524f616
SR
1161 if (unlikely(event & OHCI1394_postedWriteErr))
1162 fw_error("PCI posted write error\n");
1163
bb9f2206
SR
1164 if (unlikely(event & OHCI1394_cycleTooLong)) {
1165 if (printk_ratelimit())
1166 fw_notify("isochronous cycle too long\n");
1167 reg_write(ohci, OHCI1394_LinkControlSet,
1168 OHCI1394_LinkControl_cycleMaster);
1169 }
1170
d60d7f1d
KH
1171 if (event & OHCI1394_cycle64Seconds) {
1172 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1173 if ((cycle_time & 0x80000000) == 0)
1174 ohci->bus_seconds++;
1175 }
1176
ed568912
KH
1177 return IRQ_HANDLED;
1178}
1179
2aef469a
KH
1180static int software_reset(struct fw_ohci *ohci)
1181{
1182 int i;
1183
1184 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1185
1186 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1187 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1188 OHCI1394_HCControl_softReset) == 0)
1189 return 0;
1190 msleep(1);
1191 }
1192
1193 return -EBUSY;
1194}
1195
ed568912
KH
1196static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1197{
1198 struct fw_ohci *ohci = fw_ohci(card);
1199 struct pci_dev *dev = to_pci_dev(card->device);
1200
2aef469a
KH
1201 if (software_reset(ohci)) {
1202 fw_error("Failed to reset ohci card.\n");
1203 return -EBUSY;
1204 }
1205
1206 /*
1207 * Now enable LPS, which we need in order to start accessing
1208 * most of the registers. In fact, on some cards (ALI M5251),
1209 * accessing registers in the SClk domain without LPS enabled
1210 * will lock up the machine. Wait 50msec to make sure we have
1211 * full link enabled.
1212 */
1213 reg_write(ohci, OHCI1394_HCControlSet,
1214 OHCI1394_HCControl_LPS |
1215 OHCI1394_HCControl_postedWriteEnable);
1216 flush_writes(ohci);
1217 msleep(50);
1218
1219 reg_write(ohci, OHCI1394_HCControlClear,
1220 OHCI1394_HCControl_noByteSwapData);
1221
1222 reg_write(ohci, OHCI1394_LinkControlSet,
1223 OHCI1394_LinkControl_rcvSelfID |
1224 OHCI1394_LinkControl_cycleTimerEnable |
1225 OHCI1394_LinkControl_cycleMaster);
1226
1227 reg_write(ohci, OHCI1394_ATRetries,
1228 OHCI1394_MAX_AT_REQ_RETRIES |
1229 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1230 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1231
1232 ar_context_run(&ohci->ar_request_ctx);
1233 ar_context_run(&ohci->ar_response_ctx);
1234
1235 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1236 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1237 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1238 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1239 reg_write(ohci, OHCI1394_IntMaskSet,
1240 OHCI1394_selfIDComplete |
1241 OHCI1394_RQPkt | OHCI1394_RSPkt |
1242 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1243 OHCI1394_isochRx | OHCI1394_isochTx |
bb9f2206
SR
1244 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1245 OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
2aef469a
KH
1246
1247 /* Activate link_on bit and contender bit in our self ID packets.*/
1248 if (ohci_update_phy_reg(card, 4, 0,
1249 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1250 return -EIO;
1251
c781c06d
KH
1252 /*
1253 * When the link is not yet enabled, the atomic config rom
ed568912
KH
1254 * update mechanism described below in ohci_set_config_rom()
1255 * is not active. We have to update ConfigRomHeader and
1256 * BusOptions manually, and the write to ConfigROMmap takes
1257 * effect immediately. We tie this to the enabling of the
1258 * link, so we have a valid config rom before enabling - the
1259 * OHCI requires that ConfigROMhdr and BusOptions have valid
1260 * values before enabling.
1261 *
1262 * However, when the ConfigROMmap is written, some controllers
1263 * always read back quadlets 0 and 2 from the config rom to
1264 * the ConfigRomHeader and BusOptions registers on bus reset.
1265 * They shouldn't do that in this initial case where the link
1266 * isn't enabled. This means we have to use the same
1267 * workaround here, setting the bus header to 0 and then write
1268 * the right values in the bus reset tasklet.
1269 */
1270
0bd243c4
KH
1271 if (config_rom) {
1272 ohci->next_config_rom =
1273 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1274 &ohci->next_config_rom_bus,
1275 GFP_KERNEL);
1276 if (ohci->next_config_rom == NULL)
1277 return -ENOMEM;
ed568912 1278
0bd243c4
KH
1279 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1280 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1281 } else {
1282 /*
1283 * In the suspend case, config_rom is NULL, which
1284 * means that we just reuse the old config rom.
1285 */
1286 ohci->next_config_rom = ohci->config_rom;
1287 ohci->next_config_rom_bus = ohci->config_rom_bus;
1288 }
ed568912 1289
0bd243c4 1290 ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
ed568912
KH
1291 ohci->next_config_rom[0] = 0;
1292 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
0bd243c4
KH
1293 reg_write(ohci, OHCI1394_BusOptions,
1294 be32_to_cpu(ohci->next_config_rom[2]));
ed568912
KH
1295 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1296
1297 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1298
1299 if (request_irq(dev->irq, irq_handler,
65efffa8 1300 IRQF_SHARED, ohci_driver_name, ohci)) {
ed568912
KH
1301 fw_error("Failed to allocate shared interrupt %d.\n",
1302 dev->irq);
1303 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1304 ohci->config_rom, ohci->config_rom_bus);
1305 return -EIO;
1306 }
1307
1308 reg_write(ohci, OHCI1394_HCControlSet,
1309 OHCI1394_HCControl_linkEnable |
1310 OHCI1394_HCControl_BIBimageValid);
1311 flush_writes(ohci);
1312
c781c06d
KH
1313 /*
1314 * We are ready to go, initiate bus reset to finish the
1315 * initialization.
1316 */
ed568912
KH
1317
1318 fw_core_initiate_bus_reset(&ohci->card, 1);
1319
1320 return 0;
1321}
1322
1323static int
1324ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1325{
1326 struct fw_ohci *ohci;
1327 unsigned long flags;
4eaff7d6 1328 int retval = -EBUSY;
ed568912 1329 __be32 *next_config_rom;
f5101d58 1330 dma_addr_t uninitialized_var(next_config_rom_bus);
ed568912
KH
1331
1332 ohci = fw_ohci(card);
1333
c781c06d
KH
1334 /*
1335 * When the OHCI controller is enabled, the config rom update
ed568912
KH
1336 * mechanism is a bit tricky, but easy enough to use. See
1337 * section 5.5.6 in the OHCI specification.
1338 *
1339 * The OHCI controller caches the new config rom address in a
1340 * shadow register (ConfigROMmapNext) and needs a bus reset
1341 * for the changes to take place. When the bus reset is
1342 * detected, the controller loads the new values for the
1343 * ConfigRomHeader and BusOptions registers from the specified
1344 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1345 * shadow register. All automatically and atomically.
1346 *
1347 * Now, there's a twist to this story. The automatic load of
1348 * ConfigRomHeader and BusOptions doesn't honor the
1349 * noByteSwapData bit, so with a be32 config rom, the
1350 * controller will load be32 values in to these registers
1351 * during the atomic update, even on litte endian
1352 * architectures. The workaround we use is to put a 0 in the
1353 * header quadlet; 0 is endian agnostic and means that the
1354 * config rom isn't ready yet. In the bus reset tasklet we
1355 * then set up the real values for the two registers.
1356 *
1357 * We use ohci->lock to avoid racing with the code that sets
1358 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1359 */
1360
1361 next_config_rom =
1362 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1363 &next_config_rom_bus, GFP_KERNEL);
1364 if (next_config_rom == NULL)
1365 return -ENOMEM;
1366
1367 spin_lock_irqsave(&ohci->lock, flags);
1368
1369 if (ohci->next_config_rom == NULL) {
1370 ohci->next_config_rom = next_config_rom;
1371 ohci->next_config_rom_bus = next_config_rom_bus;
1372
1373 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1374 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1375 length * 4);
1376
1377 ohci->next_header = config_rom[0];
1378 ohci->next_config_rom[0] = 0;
1379
1380 reg_write(ohci, OHCI1394_ConfigROMmap,
1381 ohci->next_config_rom_bus);
4eaff7d6 1382 retval = 0;
ed568912
KH
1383 }
1384
1385 spin_unlock_irqrestore(&ohci->lock, flags);
1386
c781c06d
KH
1387 /*
1388 * Now initiate a bus reset to have the changes take
ed568912
KH
1389 * effect. We clean up the old config rom memory and DMA
1390 * mappings in the bus reset tasklet, since the OHCI
1391 * controller could need to access it before the bus reset
c781c06d
KH
1392 * takes effect.
1393 */
ed568912
KH
1394 if (retval == 0)
1395 fw_core_initiate_bus_reset(&ohci->card, 1);
4eaff7d6
SR
1396 else
1397 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1398 next_config_rom, next_config_rom_bus);
ed568912
KH
1399
1400 return retval;
1401}
1402
1403static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1404{
1405 struct fw_ohci *ohci = fw_ohci(card);
1406
1407 at_context_transmit(&ohci->at_request_ctx, packet);
1408}
1409
1410static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1411{
1412 struct fw_ohci *ohci = fw_ohci(card);
1413
1414 at_context_transmit(&ohci->at_response_ctx, packet);
1415}
1416
730c32f5
KH
1417static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1418{
1419 struct fw_ohci *ohci = fw_ohci(card);
f319b6a0
KH
1420 struct context *ctx = &ohci->at_request_ctx;
1421 struct driver_data *driver_data = packet->driver_data;
1422 int retval = -ENOENT;
730c32f5 1423
f319b6a0 1424 tasklet_disable(&ctx->tasklet);
730c32f5 1425
f319b6a0
KH
1426 if (packet->ack != 0)
1427 goto out;
730c32f5 1428
f319b6a0
KH
1429 driver_data->packet = NULL;
1430 packet->ack = RCODE_CANCELLED;
1431 packet->callback(packet, &ohci->card, packet->ack);
1432 retval = 0;
730c32f5 1433
f319b6a0
KH
1434 out:
1435 tasklet_enable(&ctx->tasklet);
730c32f5 1436
f319b6a0 1437 return retval;
730c32f5
KH
1438}
1439
ed568912
KH
1440static int
1441ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1442{
080de8c2
SR
1443#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1444 return 0;
1445#else
ed568912
KH
1446 struct fw_ohci *ohci = fw_ohci(card);
1447 unsigned long flags;
907293d7 1448 int n, retval = 0;
ed568912 1449
c781c06d
KH
1450 /*
1451 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1452 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1453 */
ed568912
KH
1454
1455 spin_lock_irqsave(&ohci->lock, flags);
1456
1457 if (ohci->generation != generation) {
1458 retval = -ESTALE;
1459 goto out;
1460 }
1461
c781c06d
KH
1462 /*
1463 * Note, if the node ID contains a non-local bus ID, physical DMA is
1464 * enabled for _all_ nodes on remote buses.
1465 */
907293d7
SR
1466
1467 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1468 if (n < 32)
1469 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1470 else
1471 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1472
ed568912 1473 flush_writes(ohci);
ed568912 1474 out:
6cad95fe 1475 spin_unlock_irqrestore(&ohci->lock, flags);
ed568912 1476 return retval;
080de8c2 1477#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
ed568912 1478}
373b2edd 1479
d60d7f1d
KH
1480static u64
1481ohci_get_bus_time(struct fw_card *card)
1482{
1483 struct fw_ohci *ohci = fw_ohci(card);
1484 u32 cycle_time;
1485 u64 bus_time;
1486
1487 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1488 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1489
1490 return bus_time;
1491}
1492
d2746dc1
KH
1493static int handle_ir_dualbuffer_packet(struct context *context,
1494 struct descriptor *d,
1495 struct descriptor *last)
ed568912 1496{
295e3feb
KH
1497 struct iso_context *ctx =
1498 container_of(context, struct iso_context, context);
1499 struct db_descriptor *db = (struct db_descriptor *) d;
c70dc788 1500 __le32 *ir_header;
9b32d5f3 1501 size_t header_length;
c70dc788
KH
1502 void *p, *end;
1503 int i;
d2746dc1 1504
efbf390a 1505 if (db->first_res_count != 0 && db->second_res_count != 0) {
0642b657
DM
1506 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1507 /* This descriptor isn't done yet, stop iteration. */
1508 return 0;
1509 }
1510 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1511 }
295e3feb 1512
c70dc788
KH
1513 header_length = le16_to_cpu(db->first_req_count) -
1514 le16_to_cpu(db->first_res_count);
1515
1516 i = ctx->header_length;
1517 p = db + 1;
1518 end = p + header_length;
1519 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
c781c06d
KH
1520 /*
1521 * The iso header is byteswapped to little endian by
15536221
KH
1522 * the controller, but the remaining header quadlets
1523 * are big endian. We want to present all the headers
1524 * as big endian, so we have to swap the first
c781c06d
KH
1525 * quadlet.
1526 */
15536221
KH
1527 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1528 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
c70dc788 1529 i += ctx->base.header_size;
0642b657 1530 ctx->excess_bytes +=
efbf390a 1531 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
c70dc788
KH
1532 p += ctx->base.header_size + 4;
1533 }
c70dc788 1534 ctx->header_length = i;
9b32d5f3 1535
0642b657
DM
1536 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1537 le16_to_cpu(db->second_res_count);
1538
a77754a7 1539 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
c70dc788
KH
1540 ir_header = (__le32 *) (db + 1);
1541 ctx->base.callback(&ctx->base,
1542 le32_to_cpu(ir_header[0]) & 0xffff,
9b32d5f3 1543 ctx->header_length, ctx->header,
295e3feb 1544 ctx->base.callback_data);
9b32d5f3
KH
1545 ctx->header_length = 0;
1546 }
ed568912 1547
295e3feb 1548 return 1;
ed568912
KH
1549}
1550
a186b4a6
JW
1551static int handle_ir_packet_per_buffer(struct context *context,
1552 struct descriptor *d,
1553 struct descriptor *last)
1554{
1555 struct iso_context *ctx =
1556 container_of(context, struct iso_context, context);
bcee893c 1557 struct descriptor *pd;
a186b4a6 1558 __le32 *ir_header;
bcee893c
DM
1559 void *p;
1560 int i;
a186b4a6 1561
bcee893c
DM
1562 for (pd = d; pd <= last; pd++) {
1563 if (pd->transfer_status)
1564 break;
1565 }
1566 if (pd > last)
a186b4a6
JW
1567 /* Descriptor(s) not done yet, stop iteration */
1568 return 0;
1569
a186b4a6 1570 i = ctx->header_length;
bcee893c 1571 p = last + 1;
a186b4a6 1572
bcee893c
DM
1573 if (ctx->base.header_size > 0 &&
1574 i + ctx->base.header_size <= PAGE_SIZE) {
a186b4a6
JW
1575 /*
1576 * The iso header is byteswapped to little endian by
1577 * the controller, but the remaining header quadlets
1578 * are big endian. We want to present all the headers
1579 * as big endian, so we have to swap the first quadlet.
1580 */
1581 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1582 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
bcee893c 1583 ctx->header_length += ctx->base.header_size;
a186b4a6
JW
1584 }
1585
bcee893c
DM
1586 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1587 ir_header = (__le32 *) p;
a186b4a6
JW
1588 ctx->base.callback(&ctx->base,
1589 le32_to_cpu(ir_header[0]) & 0xffff,
1590 ctx->header_length, ctx->header,
1591 ctx->base.callback_data);
1592 ctx->header_length = 0;
1593 }
1594
a186b4a6
JW
1595 return 1;
1596}
1597
30200739
KH
1598static int handle_it_packet(struct context *context,
1599 struct descriptor *d,
1600 struct descriptor *last)
ed568912 1601{
30200739
KH
1602 struct iso_context *ctx =
1603 container_of(context, struct iso_context, context);
373b2edd 1604
30200739
KH
1605 if (last->transfer_status == 0)
1606 /* This descriptor isn't done yet, stop iteration. */
1607 return 0;
1608
a77754a7 1609 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
9b32d5f3
KH
1610 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1611 0, NULL, ctx->base.callback_data);
30200739
KH
1612
1613 return 1;
ed568912
KH
1614}
1615
30200739 1616static struct fw_iso_context *
eb0306ea 1617ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
ed568912
KH
1618{
1619 struct fw_ohci *ohci = fw_ohci(card);
1620 struct iso_context *ctx, *list;
30200739 1621 descriptor_callback_t callback;
295e3feb 1622 u32 *mask, regs;
ed568912 1623 unsigned long flags;
9b32d5f3 1624 int index, retval = -ENOMEM;
ed568912
KH
1625
1626 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1627 mask = &ohci->it_context_mask;
1628 list = ohci->it_context_list;
30200739 1629 callback = handle_it_packet;
ed568912 1630 } else {
373b2edd
SR
1631 mask = &ohci->ir_context_mask;
1632 list = ohci->ir_context_list;
a186b4a6
JW
1633 if (ohci->version >= OHCI_VERSION_1_1)
1634 callback = handle_ir_dualbuffer_packet;
1635 else
1636 callback = handle_ir_packet_per_buffer;
ed568912
KH
1637 }
1638
1639 spin_lock_irqsave(&ohci->lock, flags);
1640 index = ffs(*mask) - 1;
1641 if (index >= 0)
1642 *mask &= ~(1 << index);
1643 spin_unlock_irqrestore(&ohci->lock, flags);
1644
1645 if (index < 0)
1646 return ERR_PTR(-EBUSY);
1647
373b2edd
SR
1648 if (type == FW_ISO_CONTEXT_TRANSMIT)
1649 regs = OHCI1394_IsoXmitContextBase(index);
1650 else
1651 regs = OHCI1394_IsoRcvContextBase(index);
1652
ed568912 1653 ctx = &list[index];
2d826cc5 1654 memset(ctx, 0, sizeof(*ctx));
9b32d5f3
KH
1655 ctx->header_length = 0;
1656 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1657 if (ctx->header == NULL)
1658 goto out;
1659
fe5ca634 1660 retval = context_init(&ctx->context, ohci, regs, callback);
9b32d5f3
KH
1661 if (retval < 0)
1662 goto out_with_header;
ed568912
KH
1663
1664 return &ctx->base;
9b32d5f3
KH
1665
1666 out_with_header:
1667 free_page((unsigned long)ctx->header);
1668 out:
1669 spin_lock_irqsave(&ohci->lock, flags);
1670 *mask |= 1 << index;
1671 spin_unlock_irqrestore(&ohci->lock, flags);
1672
1673 return ERR_PTR(retval);
ed568912
KH
1674}
1675
eb0306ea
KH
1676static int ohci_start_iso(struct fw_iso_context *base,
1677 s32 cycle, u32 sync, u32 tags)
ed568912 1678{
373b2edd 1679 struct iso_context *ctx = container_of(base, struct iso_context, base);
30200739 1680 struct fw_ohci *ohci = ctx->context.ohci;
8a2f7d93 1681 u32 control, match;
ed568912
KH
1682 int index;
1683
295e3feb
KH
1684 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1685 index = ctx - ohci->it_context_list;
8a2f7d93
KH
1686 match = 0;
1687 if (cycle >= 0)
1688 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
295e3feb 1689 (cycle & 0x7fff) << 16;
21efb3cf 1690
295e3feb
KH
1691 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1692 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
8a2f7d93 1693 context_run(&ctx->context, match);
295e3feb
KH
1694 } else {
1695 index = ctx - ohci->ir_context_list;
a186b4a6
JW
1696 control = IR_CONTEXT_ISOCH_HEADER;
1697 if (ohci->version >= OHCI_VERSION_1_1)
1698 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
8a2f7d93
KH
1699 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1700 if (cycle >= 0) {
1701 match |= (cycle & 0x07fff) << 12;
1702 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1703 }
ed568912 1704
295e3feb
KH
1705 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1706 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
a77754a7 1707 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
8a2f7d93 1708 context_run(&ctx->context, control);
295e3feb 1709 }
ed568912
KH
1710
1711 return 0;
1712}
1713
b8295668
KH
1714static int ohci_stop_iso(struct fw_iso_context *base)
1715{
1716 struct fw_ohci *ohci = fw_ohci(base->card);
373b2edd 1717 struct iso_context *ctx = container_of(base, struct iso_context, base);
b8295668
KH
1718 int index;
1719
1720 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1721 index = ctx - ohci->it_context_list;
1722 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1723 } else {
1724 index = ctx - ohci->ir_context_list;
1725 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1726 }
1727 flush_writes(ohci);
1728 context_stop(&ctx->context);
1729
1730 return 0;
1731}
1732
ed568912
KH
1733static void ohci_free_iso_context(struct fw_iso_context *base)
1734{
1735 struct fw_ohci *ohci = fw_ohci(base->card);
373b2edd 1736 struct iso_context *ctx = container_of(base, struct iso_context, base);
ed568912
KH
1737 unsigned long flags;
1738 int index;
1739
b8295668
KH
1740 ohci_stop_iso(base);
1741 context_release(&ctx->context);
9b32d5f3 1742 free_page((unsigned long)ctx->header);
b8295668 1743
ed568912
KH
1744 spin_lock_irqsave(&ohci->lock, flags);
1745
1746 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1747 index = ctx - ohci->it_context_list;
ed568912
KH
1748 ohci->it_context_mask |= 1 << index;
1749 } else {
1750 index = ctx - ohci->ir_context_list;
ed568912
KH
1751 ohci->ir_context_mask |= 1 << index;
1752 }
ed568912
KH
1753
1754 spin_unlock_irqrestore(&ohci->lock, flags);
1755}
1756
1757static int
295e3feb
KH
1758ohci_queue_iso_transmit(struct fw_iso_context *base,
1759 struct fw_iso_packet *packet,
1760 struct fw_iso_buffer *buffer,
1761 unsigned long payload)
ed568912 1762{
373b2edd 1763 struct iso_context *ctx = container_of(base, struct iso_context, base);
30200739 1764 struct descriptor *d, *last, *pd;
ed568912
KH
1765 struct fw_iso_packet *p;
1766 __le32 *header;
9aad8125 1767 dma_addr_t d_bus, page_bus;
ed568912
KH
1768 u32 z, header_z, payload_z, irq;
1769 u32 payload_index, payload_end_index, next_page_index;
30200739 1770 int page, end_page, i, length, offset;
ed568912 1771
c781c06d
KH
1772 /*
1773 * FIXME: Cycle lost behavior should be configurable: lose
1774 * packet, retransmit or terminate..
1775 */
ed568912
KH
1776
1777 p = packet;
9aad8125 1778 payload_index = payload;
ed568912
KH
1779
1780 if (p->skip)
1781 z = 1;
1782 else
1783 z = 2;
1784 if (p->header_length > 0)
1785 z++;
1786
1787 /* Determine the first page the payload isn't contained in. */
1788 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1789 if (p->payload_length > 0)
1790 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1791 else
1792 payload_z = 0;
1793
1794 z += payload_z;
1795
1796 /* Get header size in number of descriptors. */
2d826cc5 1797 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
ed568912 1798
30200739
KH
1799 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1800 if (d == NULL)
1801 return -ENOMEM;
ed568912
KH
1802
1803 if (!p->skip) {
a77754a7 1804 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
ed568912
KH
1805 d[0].req_count = cpu_to_le16(8);
1806
1807 header = (__le32 *) &d[1];
a77754a7
KH
1808 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1809 IT_HEADER_TAG(p->tag) |
1810 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1811 IT_HEADER_CHANNEL(ctx->base.channel) |
1812 IT_HEADER_SPEED(ctx->base.speed));
ed568912 1813 header[1] =
a77754a7 1814 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
ed568912
KH
1815 p->payload_length));
1816 }
1817
1818 if (p->header_length > 0) {
1819 d[2].req_count = cpu_to_le16(p->header_length);
2d826cc5 1820 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
ed568912
KH
1821 memcpy(&d[z], p->header, p->header_length);
1822 }
1823
1824 pd = d + z - payload_z;
1825 payload_end_index = payload_index + p->payload_length;
1826 for (i = 0; i < payload_z; i++) {
1827 page = payload_index >> PAGE_SHIFT;
1828 offset = payload_index & ~PAGE_MASK;
1829 next_page_index = (page + 1) << PAGE_SHIFT;
1830 length =
1831 min(next_page_index, payload_end_index) - payload_index;
1832 pd[i].req_count = cpu_to_le16(length);
9aad8125
KH
1833
1834 page_bus = page_private(buffer->pages[page]);
1835 pd[i].data_address = cpu_to_le32(page_bus + offset);
ed568912
KH
1836
1837 payload_index += length;
1838 }
1839
ed568912 1840 if (p->interrupt)
a77754a7 1841 irq = DESCRIPTOR_IRQ_ALWAYS;
ed568912 1842 else
a77754a7 1843 irq = DESCRIPTOR_NO_IRQ;
ed568912 1844
30200739 1845 last = z == 2 ? d : d + z - 1;
a77754a7
KH
1846 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1847 DESCRIPTOR_STATUS |
1848 DESCRIPTOR_BRANCH_ALWAYS |
cbb59da7 1849 irq);
ed568912 1850
30200739 1851 context_append(&ctx->context, d, z, header_z);
ed568912
KH
1852
1853 return 0;
1854}
373b2edd 1855
295e3feb 1856static int
d2746dc1
KH
1857ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1858 struct fw_iso_packet *packet,
1859 struct fw_iso_buffer *buffer,
1860 unsigned long payload)
295e3feb
KH
1861{
1862 struct iso_context *ctx = container_of(base, struct iso_context, base);
1863 struct db_descriptor *db = NULL;
1864 struct descriptor *d;
1865 struct fw_iso_packet *p;
1866 dma_addr_t d_bus, page_bus;
1867 u32 z, header_z, length, rest;
c70dc788 1868 int page, offset, packet_count, header_size;
373b2edd 1869
c781c06d
KH
1870 /*
1871 * FIXME: Cycle lost behavior should be configurable: lose
1872 * packet, retransmit or terminate..
1873 */
295e3feb
KH
1874
1875 p = packet;
1876 z = 2;
1877
c781c06d
KH
1878 /*
1879 * The OHCI controller puts the status word in the header
1880 * buffer too, so we need 4 extra bytes per packet.
1881 */
c70dc788
KH
1882 packet_count = p->header_length / ctx->base.header_size;
1883 header_size = packet_count * (ctx->base.header_size + 4);
1884
295e3feb 1885 /* Get header size in number of descriptors. */
2d826cc5 1886 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
295e3feb
KH
1887 page = payload >> PAGE_SHIFT;
1888 offset = payload & ~PAGE_MASK;
1889 rest = p->payload_length;
1890
295e3feb
KH
1891 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1892 while (rest > 0) {
1893 d = context_get_descriptors(&ctx->context,
1894 z + header_z, &d_bus);
1895 if (d == NULL)
1896 return -ENOMEM;
1897
1898 db = (struct db_descriptor *) d;
a77754a7
KH
1899 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1900 DESCRIPTOR_BRANCH_ALWAYS);
c70dc788 1901 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
0642b657
DM
1902 if (p->skip && rest == p->payload_length) {
1903 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
1904 db->first_req_count = db->first_size;
1905 } else {
1906 db->first_req_count = cpu_to_le16(header_size);
1907 }
1e1d196b 1908 db->first_res_count = db->first_req_count;
2d826cc5 1909 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
373b2edd 1910
0642b657
DM
1911 if (p->skip && rest == p->payload_length)
1912 length = 4;
1913 else if (offset + rest < PAGE_SIZE)
295e3feb
KH
1914 length = rest;
1915 else
1916 length = PAGE_SIZE - offset;
1917
1e1d196b
KH
1918 db->second_req_count = cpu_to_le16(length);
1919 db->second_res_count = db->second_req_count;
295e3feb
KH
1920 page_bus = page_private(buffer->pages[page]);
1921 db->second_buffer = cpu_to_le32(page_bus + offset);
1922
cb2d2cdb 1923 if (p->interrupt && length == rest)
a77754a7 1924 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
cb2d2cdb 1925
295e3feb
KH
1926 context_append(&ctx->context, d, z, header_z);
1927 offset = (offset + length) & ~PAGE_MASK;
1928 rest -= length;
0642b657
DM
1929 if (offset == 0)
1930 page++;
295e3feb
KH
1931 }
1932
d2746dc1
KH
1933 return 0;
1934}
21efb3cf 1935
a186b4a6
JW
1936static int
1937ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
1938 struct fw_iso_packet *packet,
1939 struct fw_iso_buffer *buffer,
1940 unsigned long payload)
1941{
1942 struct iso_context *ctx = container_of(base, struct iso_context, base);
1943 struct descriptor *d = NULL, *pd = NULL;
bcee893c 1944 struct fw_iso_packet *p = packet;
a186b4a6
JW
1945 dma_addr_t d_bus, page_bus;
1946 u32 z, header_z, rest;
bcee893c
DM
1947 int i, j, length;
1948 int page, offset, packet_count, header_size, payload_per_buffer;
a186b4a6
JW
1949
1950 /*
1951 * The OHCI controller puts the status word in the
1952 * buffer too, so we need 4 extra bytes per packet.
1953 */
1954 packet_count = p->header_length / ctx->base.header_size;
bcee893c 1955 header_size = ctx->base.header_size + 4;
a186b4a6
JW
1956
1957 /* Get header size in number of descriptors. */
1958 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1959 page = payload >> PAGE_SHIFT;
1960 offset = payload & ~PAGE_MASK;
bcee893c 1961 payload_per_buffer = p->payload_length / packet_count;
a186b4a6
JW
1962
1963 for (i = 0; i < packet_count; i++) {
1964 /* d points to the header descriptor */
bcee893c 1965 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
a186b4a6 1966 d = context_get_descriptors(&ctx->context,
bcee893c 1967 z + header_z, &d_bus);
a186b4a6
JW
1968 if (d == NULL)
1969 return -ENOMEM;
1970
bcee893c
DM
1971 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
1972 DESCRIPTOR_INPUT_MORE);
1973 if (p->skip && i == 0)
1974 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
a186b4a6
JW
1975 d->req_count = cpu_to_le16(header_size);
1976 d->res_count = d->req_count;
bcee893c 1977 d->transfer_status = 0;
a186b4a6
JW
1978 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
1979
bcee893c
DM
1980 rest = payload_per_buffer;
1981 for (j = 1; j < z; j++) {
1982 pd = d + j;
1983 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
1984 DESCRIPTOR_INPUT_MORE);
1985
1986 if (offset + rest < PAGE_SIZE)
1987 length = rest;
1988 else
1989 length = PAGE_SIZE - offset;
1990 pd->req_count = cpu_to_le16(length);
1991 pd->res_count = pd->req_count;
1992 pd->transfer_status = 0;
1993
1994 page_bus = page_private(buffer->pages[page]);
1995 pd->data_address = cpu_to_le32(page_bus + offset);
1996
1997 offset = (offset + length) & ~PAGE_MASK;
1998 rest -= length;
1999 if (offset == 0)
2000 page++;
2001 }
a186b4a6
JW
2002 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2003 DESCRIPTOR_INPUT_LAST |
2004 DESCRIPTOR_BRANCH_ALWAYS);
bcee893c 2005 if (p->interrupt && i == packet_count - 1)
a186b4a6
JW
2006 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2007
a186b4a6
JW
2008 context_append(&ctx->context, d, z, header_z);
2009 }
2010
2011 return 0;
2012}
2013
295e3feb
KH
2014static int
2015ohci_queue_iso(struct fw_iso_context *base,
2016 struct fw_iso_packet *packet,
2017 struct fw_iso_buffer *buffer,
2018 unsigned long payload)
2019{
e364cf4e 2020 struct iso_context *ctx = container_of(base, struct iso_context, base);
fe5ca634
DM
2021 unsigned long flags;
2022 int retval;
e364cf4e 2023
fe5ca634 2024 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
295e3feb 2025 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
fe5ca634 2026 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
e364cf4e 2027 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
fe5ca634 2028 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
d2746dc1 2029 buffer, payload);
e364cf4e 2030 else
fe5ca634 2031 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
a186b4a6
JW
2032 buffer,
2033 payload);
fe5ca634
DM
2034 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2035
2036 return retval;
295e3feb
KH
2037}
2038
21ebcd12 2039static const struct fw_card_driver ohci_driver = {
ed568912
KH
2040 .name = ohci_driver_name,
2041 .enable = ohci_enable,
2042 .update_phy_reg = ohci_update_phy_reg,
2043 .set_config_rom = ohci_set_config_rom,
2044 .send_request = ohci_send_request,
2045 .send_response = ohci_send_response,
730c32f5 2046 .cancel_packet = ohci_cancel_packet,
ed568912 2047 .enable_phys_dma = ohci_enable_phys_dma,
d60d7f1d 2048 .get_bus_time = ohci_get_bus_time,
ed568912
KH
2049
2050 .allocate_iso_context = ohci_allocate_iso_context,
2051 .free_iso_context = ohci_free_iso_context,
2052 .queue_iso = ohci_queue_iso,
69cdb726 2053 .start_iso = ohci_start_iso,
b8295668 2054 .stop_iso = ohci_stop_iso,
ed568912
KH
2055};
2056
ed568912
KH
2057static int __devinit
2058pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2059{
2060 struct fw_ohci *ohci;
e364cf4e 2061 u32 bus_options, max_receive, link_speed;
ed568912 2062 u64 guid;
d79406dd 2063 int err;
ed568912
KH
2064 size_t size;
2065
ea8d006b
SR
2066#ifdef CONFIG_PPC_PMAC
2067 /* Necessary on some machines if fw-ohci was loaded/ unloaded before */
2068 if (machine_is(powermac)) {
2069 struct device_node *ofn = pci_device_to_OF_node(dev);
2070
2071 if (ofn) {
2072 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2073 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2074 }
2075 }
2076#endif /* CONFIG_PPC_PMAC */
2077
2d826cc5 2078 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
ed568912
KH
2079 if (ohci == NULL) {
2080 fw_error("Could not malloc fw_ohci data.\n");
2081 return -ENOMEM;
2082 }
2083
2084 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2085
d79406dd
KH
2086 err = pci_enable_device(dev);
2087 if (err) {
ed568912 2088 fw_error("Failed to enable OHCI hardware.\n");
bd7dee63 2089 goto fail_free;
ed568912
KH
2090 }
2091
2092 pci_set_master(dev);
2093 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2094 pci_set_drvdata(dev, ohci);
2095
11bf20ad
SR
2096#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2097 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2098 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2099#endif
ed568912
KH
2100 spin_lock_init(&ohci->lock);
2101
2102 tasklet_init(&ohci->bus_reset_tasklet,
2103 bus_reset_tasklet, (unsigned long)ohci);
2104
d79406dd
KH
2105 err = pci_request_region(dev, 0, ohci_driver_name);
2106 if (err) {
ed568912 2107 fw_error("MMIO resource unavailable\n");
d79406dd 2108 goto fail_disable;
ed568912
KH
2109 }
2110
2111 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2112 if (ohci->registers == NULL) {
2113 fw_error("Failed to remap registers\n");
d79406dd
KH
2114 err = -ENXIO;
2115 goto fail_iomem;
ed568912
KH
2116 }
2117
ed568912
KH
2118 ar_context_init(&ohci->ar_request_ctx, ohci,
2119 OHCI1394_AsReqRcvContextControlSet);
2120
2121 ar_context_init(&ohci->ar_response_ctx, ohci,
2122 OHCI1394_AsRspRcvContextControlSet);
2123
fe5ca634 2124 context_init(&ohci->at_request_ctx, ohci,
f319b6a0 2125 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
ed568912 2126
fe5ca634 2127 context_init(&ohci->at_response_ctx, ohci,
f319b6a0 2128 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
ed568912 2129
ed568912
KH
2130 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2131 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2132 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2133 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2134 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2135
2136 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2137 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2138 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2139 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2140 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2141
2142 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2143 fw_error("Out of memory for it/ir contexts.\n");
d79406dd
KH
2144 err = -ENOMEM;
2145 goto fail_registers;
ed568912
KH
2146 }
2147
2148 /* self-id dma buffer allocation */
2149 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2150 SELF_ID_BUF_SIZE,
2151 &ohci->self_id_bus,
2152 GFP_KERNEL);
2153 if (ohci->self_id_cpu == NULL) {
2154 fw_error("Out of memory for self ID buffer.\n");
d79406dd
KH
2155 err = -ENOMEM;
2156 goto fail_registers;
ed568912
KH
2157 }
2158
ed568912
KH
2159 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2160 max_receive = (bus_options >> 12) & 0xf;
2161 link_speed = bus_options & 0x7;
2162 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2163 reg_read(ohci, OHCI1394_GUIDLo);
2164
d79406dd
KH
2165 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2166 if (err < 0)
2167 goto fail_self_id;
ed568912 2168
e364cf4e 2169 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
500be725 2170 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
e364cf4e 2171 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
ed568912 2172 return 0;
d79406dd
KH
2173
2174 fail_self_id:
2175 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2176 ohci->self_id_cpu, ohci->self_id_bus);
2177 fail_registers:
2178 kfree(ohci->it_context_list);
2179 kfree(ohci->ir_context_list);
2180 pci_iounmap(dev, ohci->registers);
2181 fail_iomem:
2182 pci_release_region(dev, 0);
2183 fail_disable:
2184 pci_disable_device(dev);
bd7dee63
SR
2185 fail_free:
2186 kfree(&ohci->card);
d79406dd
KH
2187
2188 return err;
ed568912
KH
2189}
2190
2191static void pci_remove(struct pci_dev *dev)
2192{
2193 struct fw_ohci *ohci;
2194
2195 ohci = pci_get_drvdata(dev);
e254a4b4
KH
2196 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2197 flush_writes(ohci);
ed568912
KH
2198 fw_core_remove_card(&ohci->card);
2199
c781c06d
KH
2200 /*
2201 * FIXME: Fail all pending packets here, now that the upper
2202 * layers can't queue any more.
2203 */
ed568912
KH
2204
2205 software_reset(ohci);
2206 free_irq(dev->irq, ohci);
d79406dd
KH
2207 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2208 ohci->self_id_cpu, ohci->self_id_bus);
2209 kfree(ohci->it_context_list);
2210 kfree(ohci->ir_context_list);
2211 pci_iounmap(dev, ohci->registers);
2212 pci_release_region(dev, 0);
2213 pci_disable_device(dev);
bd7dee63 2214 kfree(&ohci->card);
ed568912 2215
ea8d006b
SR
2216#ifdef CONFIG_PPC_PMAC
2217 /* On UniNorth, power down the cable and turn off the chip clock
2218 * to save power on laptops */
2219 if (machine_is(powermac)) {
2220 struct device_node *ofn = pci_device_to_OF_node(dev);
2221
2222 if (ofn) {
2223 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2224 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2225 }
2226 }
2227#endif /* CONFIG_PPC_PMAC */
2228
ed568912
KH
2229 fw_notify("Removed fw-ohci device.\n");
2230}
2231
2aef469a
KH
2232#ifdef CONFIG_PM
2233static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
2234{
2235 struct fw_ohci *ohci = pci_get_drvdata(pdev);
2236 int err;
2237
2238 software_reset(ohci);
2239 free_irq(pdev->irq, ohci);
2240 err = pci_save_state(pdev);
2241 if (err) {
8a8cea27 2242 fw_error("pci_save_state failed\n");
2aef469a
KH
2243 return err;
2244 }
2245 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
55111428
SR
2246 if (err)
2247 fw_error("pci_set_power_state failed with %d\n", err);
2aef469a 2248
ea8d006b
SR
2249/* PowerMac suspend code comes last */
2250#ifdef CONFIG_PPC_PMAC
2251 if (machine_is(powermac)) {
2252 struct device_node *ofn = pci_device_to_OF_node(pdev);
2253
eb5ca72e 2254 if (ofn) {
ea8d006b 2255 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
eb5ca72e
SR
2256 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2257 }
ea8d006b
SR
2258 }
2259#endif /* CONFIG_PPC_PMAC */
2260
2aef469a
KH
2261 return 0;
2262}
2263
2264static int pci_resume(struct pci_dev *pdev)
2265{
2266 struct fw_ohci *ohci = pci_get_drvdata(pdev);
2267 int err;
2268
ea8d006b
SR
2269/* PowerMac resume code comes first */
2270#ifdef CONFIG_PPC_PMAC
2271 if (machine_is(powermac)) {
2272 struct device_node *ofn = pci_device_to_OF_node(pdev);
2273
eb5ca72e
SR
2274 if (ofn) {
2275 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
ea8d006b 2276 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
eb5ca72e 2277 }
ea8d006b
SR
2278 }
2279#endif /* CONFIG_PPC_PMAC */
2280
2aef469a
KH
2281 pci_set_power_state(pdev, PCI_D0);
2282 pci_restore_state(pdev);
2283 err = pci_enable_device(pdev);
2284 if (err) {
8a8cea27 2285 fw_error("pci_enable_device failed\n");
2aef469a
KH
2286 return err;
2287 }
2288
0bd243c4 2289 return ohci_enable(&ohci->card, NULL, 0);
2aef469a
KH
2290}
2291#endif
2292
ed568912
KH
2293static struct pci_device_id pci_table[] = {
2294 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2295 { }
2296};
2297
2298MODULE_DEVICE_TABLE(pci, pci_table);
2299
2300static struct pci_driver fw_ohci_pci_driver = {
2301 .name = ohci_driver_name,
2302 .id_table = pci_table,
2303 .probe = pci_probe,
2304 .remove = pci_remove,
2aef469a
KH
2305#ifdef CONFIG_PM
2306 .resume = pci_resume,
2307 .suspend = pci_suspend,
2308#endif
ed568912
KH
2309};
2310
2311MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2312MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2313MODULE_LICENSE("GPL");
2314
1e4c7b0d
OH
2315/* Provide a module alias so root-on-sbp2 initrds don't break. */
2316#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2317MODULE_ALIAS("ohci1394");
2318#endif
2319
ed568912
KH
2320static int __init fw_ohci_init(void)
2321{
2322 return pci_register_driver(&fw_ohci_pci_driver);
2323}
2324
2325static void __exit fw_ohci_cleanup(void)
2326{
2327 pci_unregister_driver(&fw_ohci_pci_driver);
2328}
2329
2330module_init(fw_ohci_init);
2331module_exit(fw_ohci_cleanup);