]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/firewire/ohci.c
firewire: ohci: enable cycle timer fix on ALi and NEC controllers
[net-next-2.6.git] / drivers / firewire / ohci.c
1 /*
2  * Driver for OHCI 1394 controllers
3  *
4  * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-constants.h>
27 #include <linux/gfp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/io.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/pci.h>
37 #include <linux/pci_ids.h>
38 #include <linux/spinlock.h>
39 #include <linux/string.h>
40
41 #include <asm/atomic.h>
42 #include <asm/byteorder.h>
43 #include <asm/page.h>
44 #include <asm/system.h>
45
46 #ifdef CONFIG_PPC_PMAC
47 #include <asm/pmac_feature.h>
48 #endif
49
50 #include "core.h"
51 #include "ohci.h"
52
53 #define DESCRIPTOR_OUTPUT_MORE          0
54 #define DESCRIPTOR_OUTPUT_LAST          (1 << 12)
55 #define DESCRIPTOR_INPUT_MORE           (2 << 12)
56 #define DESCRIPTOR_INPUT_LAST           (3 << 12)
57 #define DESCRIPTOR_STATUS               (1 << 11)
58 #define DESCRIPTOR_KEY_IMMEDIATE        (2 << 8)
59 #define DESCRIPTOR_PING                 (1 << 7)
60 #define DESCRIPTOR_YY                   (1 << 6)
61 #define DESCRIPTOR_NO_IRQ               (0 << 4)
62 #define DESCRIPTOR_IRQ_ERROR            (1 << 4)
63 #define DESCRIPTOR_IRQ_ALWAYS           (3 << 4)
64 #define DESCRIPTOR_BRANCH_ALWAYS        (3 << 2)
65 #define DESCRIPTOR_WAIT                 (3 << 0)
66
67 struct descriptor {
68         __le16 req_count;
69         __le16 control;
70         __le32 data_address;
71         __le32 branch_address;
72         __le16 res_count;
73         __le16 transfer_status;
74 } __attribute__((aligned(16)));
75
76 struct db_descriptor {
77         __le16 first_size;
78         __le16 control;
79         __le16 second_req_count;
80         __le16 first_req_count;
81         __le32 branch_address;
82         __le16 second_res_count;
83         __le16 first_res_count;
84         __le32 reserved0;
85         __le32 first_buffer;
86         __le32 second_buffer;
87         __le32 reserved1;
88 } __attribute__((aligned(16)));
89
90 #define CONTROL_SET(regs)       (regs)
91 #define CONTROL_CLEAR(regs)     ((regs) + 4)
92 #define COMMAND_PTR(regs)       ((regs) + 12)
93 #define CONTEXT_MATCH(regs)     ((regs) + 16)
94
95 struct ar_buffer {
96         struct descriptor descriptor;
97         struct ar_buffer *next;
98         __le32 data[0];
99 };
100
101 struct ar_context {
102         struct fw_ohci *ohci;
103         struct ar_buffer *current_buffer;
104         struct ar_buffer *last_buffer;
105         void *pointer;
106         u32 regs;
107         struct tasklet_struct tasklet;
108 };
109
110 struct context;
111
112 typedef int (*descriptor_callback_t)(struct context *ctx,
113                                      struct descriptor *d,
114                                      struct descriptor *last);
115
116 /*
117  * A buffer that contains a block of DMA-able coherent memory used for
118  * storing a portion of a DMA descriptor program.
119  */
120 struct descriptor_buffer {
121         struct list_head list;
122         dma_addr_t buffer_bus;
123         size_t buffer_size;
124         size_t used;
125         struct descriptor buffer[0];
126 };
127
128 struct context {
129         struct fw_ohci *ohci;
130         u32 regs;
131         int total_allocation;
132
133         /*
134          * List of page-sized buffers for storing DMA descriptors.
135          * Head of list contains buffers in use and tail of list contains
136          * free buffers.
137          */
138         struct list_head buffer_list;
139
140         /*
141          * Pointer to a buffer inside buffer_list that contains the tail
142          * end of the current DMA program.
143          */
144         struct descriptor_buffer *buffer_tail;
145
146         /*
147          * The descriptor containing the branch address of the first
148          * descriptor that has not yet been filled by the device.
149          */
150         struct descriptor *last;
151
152         /*
153          * The last descriptor in the DMA program.  It contains the branch
154          * address that must be updated upon appending a new descriptor.
155          */
156         struct descriptor *prev;
157
158         descriptor_callback_t callback;
159
160         struct tasklet_struct tasklet;
161 };
162
163 #define IT_HEADER_SY(v)          ((v) <<  0)
164 #define IT_HEADER_TCODE(v)       ((v) <<  4)
165 #define IT_HEADER_CHANNEL(v)     ((v) <<  8)
166 #define IT_HEADER_TAG(v)         ((v) << 14)
167 #define IT_HEADER_SPEED(v)       ((v) << 16)
168 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
169
170 struct iso_context {
171         struct fw_iso_context base;
172         struct context context;
173         int excess_bytes;
174         void *header;
175         size_t header_length;
176 };
177
178 #define CONFIG_ROM_SIZE 1024
179
180 struct fw_ohci {
181         struct fw_card card;
182
183         __iomem char *registers;
184         dma_addr_t self_id_bus;
185         __le32 *self_id_cpu;
186         struct tasklet_struct bus_reset_tasklet;
187         int node_id;
188         int generation;
189         int request_generation; /* for timestamping incoming requests */
190         atomic_t bus_seconds;
191
192         bool use_dualbuffer;
193         bool old_uninorth;
194         bool bus_reset_packet_quirk;
195         bool iso_cycle_timer_quirk;
196
197         /*
198          * Spinlock for accessing fw_ohci data.  Never call out of
199          * this driver with this lock held.
200          */
201         spinlock_t lock;
202         u32 self_id_buffer[512];
203
204         /* Config rom buffers */
205         __be32 *config_rom;
206         dma_addr_t config_rom_bus;
207         __be32 *next_config_rom;
208         dma_addr_t next_config_rom_bus;
209         __be32 next_header;
210
211         struct ar_context ar_request_ctx;
212         struct ar_context ar_response_ctx;
213         struct context at_request_ctx;
214         struct context at_response_ctx;
215
216         u32 it_context_mask;
217         struct iso_context *it_context_list;
218         u64 ir_context_channels;
219         u32 ir_context_mask;
220         struct iso_context *ir_context_list;
221 };
222
223 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
224 {
225         return container_of(card, struct fw_ohci, card);
226 }
227
228 #define IT_CONTEXT_CYCLE_MATCH_ENABLE   0x80000000
229 #define IR_CONTEXT_BUFFER_FILL          0x80000000
230 #define IR_CONTEXT_ISOCH_HEADER         0x40000000
231 #define IR_CONTEXT_CYCLE_MATCH_ENABLE   0x20000000
232 #define IR_CONTEXT_MULTI_CHANNEL_MODE   0x10000000
233 #define IR_CONTEXT_DUAL_BUFFER_MODE     0x08000000
234
235 #define CONTEXT_RUN     0x8000
236 #define CONTEXT_WAKE    0x1000
237 #define CONTEXT_DEAD    0x0800
238 #define CONTEXT_ACTIVE  0x0400
239
240 #define OHCI1394_MAX_AT_REQ_RETRIES     0xf
241 #define OHCI1394_MAX_AT_RESP_RETRIES    0x2
242 #define OHCI1394_MAX_PHYS_RESP_RETRIES  0x8
243
244 #define OHCI1394_REGISTER_SIZE          0x800
245 #define OHCI_LOOP_COUNT                 500
246 #define OHCI1394_PCI_HCI_Control        0x40
247 #define SELF_ID_BUF_SIZE                0x800
248 #define OHCI_TCODE_PHY_PACKET           0x0e
249 #define OHCI_VERSION_1_1                0x010010
250
251 static char ohci_driver_name[] = KBUILD_MODNAME;
252
253 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
254
255 #define OHCI_PARAM_DEBUG_AT_AR          1
256 #define OHCI_PARAM_DEBUG_SELFIDS        2
257 #define OHCI_PARAM_DEBUG_IRQS           4
258 #define OHCI_PARAM_DEBUG_BUSRESETS      8 /* only effective before chip init */
259
260 static int param_debug;
261 module_param_named(debug, param_debug, int, 0644);
262 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
263         ", AT/AR events = "     __stringify(OHCI_PARAM_DEBUG_AT_AR)
264         ", self-IDs = "         __stringify(OHCI_PARAM_DEBUG_SELFIDS)
265         ", IRQs = "             __stringify(OHCI_PARAM_DEBUG_IRQS)
266         ", busReset events = "  __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
267         ", or a combination, or all = -1)");
268
269 static void log_irqs(u32 evt)
270 {
271         if (likely(!(param_debug &
272                         (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
273                 return;
274
275         if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
276             !(evt & OHCI1394_busReset))
277                 return;
278
279         fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
280             evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
281             evt & OHCI1394_RQPkt                ? " AR_req"             : "",
282             evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
283             evt & OHCI1394_reqTxComplete        ? " AT_req"             : "",
284             evt & OHCI1394_respTxComplete       ? " AT_resp"            : "",
285             evt & OHCI1394_isochRx              ? " IR"                 : "",
286             evt & OHCI1394_isochTx              ? " IT"                 : "",
287             evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
288             evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
289             evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
290             evt & OHCI1394_cycleInconsistent    ? " cycleInconsistent"  : "",
291             evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
292             evt & OHCI1394_busReset             ? " busReset"           : "",
293             evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
294                     OHCI1394_RSPkt | OHCI1394_reqTxComplete |
295                     OHCI1394_respTxComplete | OHCI1394_isochRx |
296                     OHCI1394_isochTx | OHCI1394_postedWriteErr |
297                     OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
298                     OHCI1394_cycleInconsistent |
299                     OHCI1394_regAccessFail | OHCI1394_busReset)
300                                                 ? " ?"                  : "");
301 }
302
303 static const char *speed[] = {
304         [0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
305 };
306 static const char *power[] = {
307         [0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
308         [4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
309 };
310 static const char port[] = { '.', '-', 'p', 'c', };
311
312 static char _p(u32 *s, int shift)
313 {
314         return port[*s >> shift & 3];
315 }
316
317 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
318 {
319         if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
320                 return;
321
322         fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
323                   self_id_count, generation, node_id);
324
325         for (; self_id_count--; ++s)
326                 if ((*s & 1 << 23) == 0)
327                         fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
328                             "%s gc=%d %s %s%s%s\n",
329                             *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
330                             speed[*s >> 14 & 3], *s >> 16 & 63,
331                             power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
332                             *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
333                 else
334                         fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
335                             *s, *s >> 24 & 63,
336                             _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
337                             _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
338 }
339
340 static const char *evts[] = {
341         [0x00] = "evt_no_status",       [0x01] = "-reserved-",
342         [0x02] = "evt_long_packet",     [0x03] = "evt_missing_ack",
343         [0x04] = "evt_underrun",        [0x05] = "evt_overrun",
344         [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
345         [0x08] = "evt_data_write",      [0x09] = "evt_bus_reset",
346         [0x0a] = "evt_timeout",         [0x0b] = "evt_tcode_err",
347         [0x0c] = "-reserved-",          [0x0d] = "-reserved-",
348         [0x0e] = "evt_unknown",         [0x0f] = "evt_flushed",
349         [0x10] = "-reserved-",          [0x11] = "ack_complete",
350         [0x12] = "ack_pending ",        [0x13] = "-reserved-",
351         [0x14] = "ack_busy_X",          [0x15] = "ack_busy_A",
352         [0x16] = "ack_busy_B",          [0x17] = "-reserved-",
353         [0x18] = "-reserved-",          [0x19] = "-reserved-",
354         [0x1a] = "-reserved-",          [0x1b] = "ack_tardy",
355         [0x1c] = "-reserved-",          [0x1d] = "ack_data_error",
356         [0x1e] = "ack_type_error",      [0x1f] = "-reserved-",
357         [0x20] = "pending/cancelled",
358 };
359 static const char *tcodes[] = {
360         [0x0] = "QW req",               [0x1] = "BW req",
361         [0x2] = "W resp",               [0x3] = "-reserved-",
362         [0x4] = "QR req",               [0x5] = "BR req",
363         [0x6] = "QR resp",              [0x7] = "BR resp",
364         [0x8] = "cycle start",          [0x9] = "Lk req",
365         [0xa] = "async stream packet",  [0xb] = "Lk resp",
366         [0xc] = "-reserved-",           [0xd] = "-reserved-",
367         [0xe] = "link internal",        [0xf] = "-reserved-",
368 };
369 static const char *phys[] = {
370         [0x0] = "phy config packet",    [0x1] = "link-on packet",
371         [0x2] = "self-id packet",       [0x3] = "-reserved-",
372 };
373
374 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
375 {
376         int tcode = header[0] >> 4 & 0xf;
377         char specific[12];
378
379         if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
380                 return;
381
382         if (unlikely(evt >= ARRAY_SIZE(evts)))
383                         evt = 0x1f;
384
385         if (evt == OHCI1394_evt_bus_reset) {
386                 fw_notify("A%c evt_bus_reset, generation %d\n",
387                     dir, (header[2] >> 16) & 0xff);
388                 return;
389         }
390
391         if (header[0] == ~header[1]) {
392                 fw_notify("A%c %s, %s, %08x\n",
393                     dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
394                 return;
395         }
396
397         switch (tcode) {
398         case 0x0: case 0x6: case 0x8:
399                 snprintf(specific, sizeof(specific), " = %08x",
400                          be32_to_cpu((__force __be32)header[3]));
401                 break;
402         case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
403                 snprintf(specific, sizeof(specific), " %x,%x",
404                          header[3] >> 16, header[3] & 0xffff);
405                 break;
406         default:
407                 specific[0] = '\0';
408         }
409
410         switch (tcode) {
411         case 0xe: case 0xa:
412                 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
413                 break;
414         case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
415                 fw_notify("A%c spd %x tl %02x, "
416                     "%04x -> %04x, %s, "
417                     "%s, %04x%08x%s\n",
418                     dir, speed, header[0] >> 10 & 0x3f,
419                     header[1] >> 16, header[0] >> 16, evts[evt],
420                     tcodes[tcode], header[1] & 0xffff, header[2], specific);
421                 break;
422         default:
423                 fw_notify("A%c spd %x tl %02x, "
424                     "%04x -> %04x, %s, "
425                     "%s%s\n",
426                     dir, speed, header[0] >> 10 & 0x3f,
427                     header[1] >> 16, header[0] >> 16, evts[evt],
428                     tcodes[tcode], specific);
429         }
430 }
431
432 #else
433
434 #define log_irqs(evt)
435 #define log_selfids(node_id, generation, self_id_count, sid)
436 #define log_ar_at_event(dir, speed, header, evt)
437
438 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
439
440 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
441 {
442         writel(data, ohci->registers + offset);
443 }
444
445 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
446 {
447         return readl(ohci->registers + offset);
448 }
449
450 static inline void flush_writes(const struct fw_ohci *ohci)
451 {
452         /* Do a dummy read to flush writes. */
453         reg_read(ohci, OHCI1394_Version);
454 }
455
456 static int ohci_update_phy_reg(struct fw_card *card, int addr,
457                                int clear_bits, int set_bits)
458 {
459         struct fw_ohci *ohci = fw_ohci(card);
460         u32 val, old;
461
462         reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
463         flush_writes(ohci);
464         msleep(2);
465         val = reg_read(ohci, OHCI1394_PhyControl);
466         if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
467                 fw_error("failed to set phy reg bits.\n");
468                 return -EBUSY;
469         }
470
471         old = OHCI1394_PhyControl_ReadData(val);
472         old = (old & ~clear_bits) | set_bits;
473         reg_write(ohci, OHCI1394_PhyControl,
474                   OHCI1394_PhyControl_Write(addr, old));
475
476         return 0;
477 }
478
479 static int ar_context_add_page(struct ar_context *ctx)
480 {
481         struct device *dev = ctx->ohci->card.device;
482         struct ar_buffer *ab;
483         dma_addr_t uninitialized_var(ab_bus);
484         size_t offset;
485
486         ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
487         if (ab == NULL)
488                 return -ENOMEM;
489
490         ab->next = NULL;
491         memset(&ab->descriptor, 0, sizeof(ab->descriptor));
492         ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
493                                                     DESCRIPTOR_STATUS |
494                                                     DESCRIPTOR_BRANCH_ALWAYS);
495         offset = offsetof(struct ar_buffer, data);
496         ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
497         ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
498         ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
499         ab->descriptor.branch_address = 0;
500
501         ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
502         ctx->last_buffer->next = ab;
503         ctx->last_buffer = ab;
504
505         reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
506         flush_writes(ctx->ohci);
507
508         return 0;
509 }
510
511 static void ar_context_release(struct ar_context *ctx)
512 {
513         struct ar_buffer *ab, *ab_next;
514         size_t offset;
515         dma_addr_t ab_bus;
516
517         for (ab = ctx->current_buffer; ab; ab = ab_next) {
518                 ab_next = ab->next;
519                 offset = offsetof(struct ar_buffer, data);
520                 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
521                 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
522                                   ab, ab_bus);
523         }
524 }
525
526 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
527 #define cond_le32_to_cpu(v) \
528         (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
529 #else
530 #define cond_le32_to_cpu(v) le32_to_cpu(v)
531 #endif
532
533 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
534 {
535         struct fw_ohci *ohci = ctx->ohci;
536         struct fw_packet p;
537         u32 status, length, tcode;
538         int evt;
539
540         p.header[0] = cond_le32_to_cpu(buffer[0]);
541         p.header[1] = cond_le32_to_cpu(buffer[1]);
542         p.header[2] = cond_le32_to_cpu(buffer[2]);
543
544         tcode = (p.header[0] >> 4) & 0x0f;
545         switch (tcode) {
546         case TCODE_WRITE_QUADLET_REQUEST:
547         case TCODE_READ_QUADLET_RESPONSE:
548                 p.header[3] = (__force __u32) buffer[3];
549                 p.header_length = 16;
550                 p.payload_length = 0;
551                 break;
552
553         case TCODE_READ_BLOCK_REQUEST :
554                 p.header[3] = cond_le32_to_cpu(buffer[3]);
555                 p.header_length = 16;
556                 p.payload_length = 0;
557                 break;
558
559         case TCODE_WRITE_BLOCK_REQUEST:
560         case TCODE_READ_BLOCK_RESPONSE:
561         case TCODE_LOCK_REQUEST:
562         case TCODE_LOCK_RESPONSE:
563                 p.header[3] = cond_le32_to_cpu(buffer[3]);
564                 p.header_length = 16;
565                 p.payload_length = p.header[3] >> 16;
566                 break;
567
568         case TCODE_WRITE_RESPONSE:
569         case TCODE_READ_QUADLET_REQUEST:
570         case OHCI_TCODE_PHY_PACKET:
571                 p.header_length = 12;
572                 p.payload_length = 0;
573                 break;
574
575         default:
576                 /* FIXME: Stop context, discard everything, and restart? */
577                 p.header_length = 0;
578                 p.payload_length = 0;
579         }
580
581         p.payload = (void *) buffer + p.header_length;
582
583         /* FIXME: What to do about evt_* errors? */
584         length = (p.header_length + p.payload_length + 3) / 4;
585         status = cond_le32_to_cpu(buffer[length]);
586         evt    = (status >> 16) & 0x1f;
587
588         p.ack        = evt - 16;
589         p.speed      = (status >> 21) & 0x7;
590         p.timestamp  = status & 0xffff;
591         p.generation = ohci->request_generation;
592
593         log_ar_at_event('R', p.speed, p.header, evt);
594
595         /*
596          * The OHCI bus reset handler synthesizes a phy packet with
597          * the new generation number when a bus reset happens (see
598          * section 8.4.2.3).  This helps us determine when a request
599          * was received and make sure we send the response in the same
600          * generation.  We only need this for requests; for responses
601          * we use the unique tlabel for finding the matching
602          * request.
603          *
604          * Alas some chips sometimes emit bus reset packets with a
605          * wrong generation.  We set the correct generation for these
606          * at a slightly incorrect time (in bus_reset_tasklet).
607          */
608         if (evt == OHCI1394_evt_bus_reset) {
609                 if (!ohci->bus_reset_packet_quirk)
610                         ohci->request_generation = (p.header[2] >> 16) & 0xff;
611         } else if (ctx == &ohci->ar_request_ctx) {
612                 fw_core_handle_request(&ohci->card, &p);
613         } else {
614                 fw_core_handle_response(&ohci->card, &p);
615         }
616
617         return buffer + length + 1;
618 }
619
620 static void ar_context_tasklet(unsigned long data)
621 {
622         struct ar_context *ctx = (struct ar_context *)data;
623         struct fw_ohci *ohci = ctx->ohci;
624         struct ar_buffer *ab;
625         struct descriptor *d;
626         void *buffer, *end;
627
628         ab = ctx->current_buffer;
629         d = &ab->descriptor;
630
631         if (d->res_count == 0) {
632                 size_t size, rest, offset;
633                 dma_addr_t start_bus;
634                 void *start;
635
636                 /*
637                  * This descriptor is finished and we may have a
638                  * packet split across this and the next buffer. We
639                  * reuse the page for reassembling the split packet.
640                  */
641
642                 offset = offsetof(struct ar_buffer, data);
643                 start = buffer = ab;
644                 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
645
646                 ab = ab->next;
647                 d = &ab->descriptor;
648                 size = buffer + PAGE_SIZE - ctx->pointer;
649                 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
650                 memmove(buffer, ctx->pointer, size);
651                 memcpy(buffer + size, ab->data, rest);
652                 ctx->current_buffer = ab;
653                 ctx->pointer = (void *) ab->data + rest;
654                 end = buffer + size + rest;
655
656                 while (buffer < end)
657                         buffer = handle_ar_packet(ctx, buffer);
658
659                 dma_free_coherent(ohci->card.device, PAGE_SIZE,
660                                   start, start_bus);
661                 ar_context_add_page(ctx);
662         } else {
663                 buffer = ctx->pointer;
664                 ctx->pointer = end =
665                         (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
666
667                 while (buffer < end)
668                         buffer = handle_ar_packet(ctx, buffer);
669         }
670 }
671
672 static int ar_context_init(struct ar_context *ctx,
673                            struct fw_ohci *ohci, u32 regs)
674 {
675         struct ar_buffer ab;
676
677         ctx->regs        = regs;
678         ctx->ohci        = ohci;
679         ctx->last_buffer = &ab;
680         tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
681
682         ar_context_add_page(ctx);
683         ar_context_add_page(ctx);
684         ctx->current_buffer = ab.next;
685         ctx->pointer = ctx->current_buffer->data;
686
687         return 0;
688 }
689
690 static void ar_context_run(struct ar_context *ctx)
691 {
692         struct ar_buffer *ab = ctx->current_buffer;
693         dma_addr_t ab_bus;
694         size_t offset;
695
696         offset = offsetof(struct ar_buffer, data);
697         ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
698
699         reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
700         reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
701         flush_writes(ctx->ohci);
702 }
703
704 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
705 {
706         int b, key;
707
708         b   = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
709         key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
710
711         /* figure out which descriptor the branch address goes in */
712         if (z == 2 && (b == 3 || key == 2))
713                 return d;
714         else
715                 return d + z - 1;
716 }
717
718 static void context_tasklet(unsigned long data)
719 {
720         struct context *ctx = (struct context *) data;
721         struct descriptor *d, *last;
722         u32 address;
723         int z;
724         struct descriptor_buffer *desc;
725
726         desc = list_entry(ctx->buffer_list.next,
727                         struct descriptor_buffer, list);
728         last = ctx->last;
729         while (last->branch_address != 0) {
730                 struct descriptor_buffer *old_desc = desc;
731                 address = le32_to_cpu(last->branch_address);
732                 z = address & 0xf;
733                 address &= ~0xf;
734
735                 /* If the branch address points to a buffer outside of the
736                  * current buffer, advance to the next buffer. */
737                 if (address < desc->buffer_bus ||
738                                 address >= desc->buffer_bus + desc->used)
739                         desc = list_entry(desc->list.next,
740                                         struct descriptor_buffer, list);
741                 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
742                 last = find_branch_descriptor(d, z);
743
744                 if (!ctx->callback(ctx, d, last))
745                         break;
746
747                 if (old_desc != desc) {
748                         /* If we've advanced to the next buffer, move the
749                          * previous buffer to the free list. */
750                         unsigned long flags;
751                         old_desc->used = 0;
752                         spin_lock_irqsave(&ctx->ohci->lock, flags);
753                         list_move_tail(&old_desc->list, &ctx->buffer_list);
754                         spin_unlock_irqrestore(&ctx->ohci->lock, flags);
755                 }
756                 ctx->last = last;
757         }
758 }
759
760 /*
761  * Allocate a new buffer and add it to the list of free buffers for this
762  * context.  Must be called with ohci->lock held.
763  */
764 static int context_add_buffer(struct context *ctx)
765 {
766         struct descriptor_buffer *desc;
767         dma_addr_t uninitialized_var(bus_addr);
768         int offset;
769
770         /*
771          * 16MB of descriptors should be far more than enough for any DMA
772          * program.  This will catch run-away userspace or DoS attacks.
773          */
774         if (ctx->total_allocation >= 16*1024*1024)
775                 return -ENOMEM;
776
777         desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
778                         &bus_addr, GFP_ATOMIC);
779         if (!desc)
780                 return -ENOMEM;
781
782         offset = (void *)&desc->buffer - (void *)desc;
783         desc->buffer_size = PAGE_SIZE - offset;
784         desc->buffer_bus = bus_addr + offset;
785         desc->used = 0;
786
787         list_add_tail(&desc->list, &ctx->buffer_list);
788         ctx->total_allocation += PAGE_SIZE;
789
790         return 0;
791 }
792
793 static int context_init(struct context *ctx, struct fw_ohci *ohci,
794                         u32 regs, descriptor_callback_t callback)
795 {
796         ctx->ohci = ohci;
797         ctx->regs = regs;
798         ctx->total_allocation = 0;
799
800         INIT_LIST_HEAD(&ctx->buffer_list);
801         if (context_add_buffer(ctx) < 0)
802                 return -ENOMEM;
803
804         ctx->buffer_tail = list_entry(ctx->buffer_list.next,
805                         struct descriptor_buffer, list);
806
807         tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
808         ctx->callback = callback;
809
810         /*
811          * We put a dummy descriptor in the buffer that has a NULL
812          * branch address and looks like it's been sent.  That way we
813          * have a descriptor to append DMA programs to.
814          */
815         memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
816         ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
817         ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
818         ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
819         ctx->last = ctx->buffer_tail->buffer;
820         ctx->prev = ctx->buffer_tail->buffer;
821
822         return 0;
823 }
824
825 static void context_release(struct context *ctx)
826 {
827         struct fw_card *card = &ctx->ohci->card;
828         struct descriptor_buffer *desc, *tmp;
829
830         list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
831                 dma_free_coherent(card->device, PAGE_SIZE, desc,
832                         desc->buffer_bus -
833                         ((void *)&desc->buffer - (void *)desc));
834 }
835
836 /* Must be called with ohci->lock held */
837 static struct descriptor *context_get_descriptors(struct context *ctx,
838                                                   int z, dma_addr_t *d_bus)
839 {
840         struct descriptor *d = NULL;
841         struct descriptor_buffer *desc = ctx->buffer_tail;
842
843         if (z * sizeof(*d) > desc->buffer_size)
844                 return NULL;
845
846         if (z * sizeof(*d) > desc->buffer_size - desc->used) {
847                 /* No room for the descriptor in this buffer, so advance to the
848                  * next one. */
849
850                 if (desc->list.next == &ctx->buffer_list) {
851                         /* If there is no free buffer next in the list,
852                          * allocate one. */
853                         if (context_add_buffer(ctx) < 0)
854                                 return NULL;
855                 }
856                 desc = list_entry(desc->list.next,
857                                 struct descriptor_buffer, list);
858                 ctx->buffer_tail = desc;
859         }
860
861         d = desc->buffer + desc->used / sizeof(*d);
862         memset(d, 0, z * sizeof(*d));
863         *d_bus = desc->buffer_bus + desc->used;
864
865         return d;
866 }
867
868 static void context_run(struct context *ctx, u32 extra)
869 {
870         struct fw_ohci *ohci = ctx->ohci;
871
872         reg_write(ohci, COMMAND_PTR(ctx->regs),
873                   le32_to_cpu(ctx->last->branch_address));
874         reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
875         reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
876         flush_writes(ohci);
877 }
878
879 static void context_append(struct context *ctx,
880                            struct descriptor *d, int z, int extra)
881 {
882         dma_addr_t d_bus;
883         struct descriptor_buffer *desc = ctx->buffer_tail;
884
885         d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
886
887         desc->used += (z + extra) * sizeof(*d);
888         ctx->prev->branch_address = cpu_to_le32(d_bus | z);
889         ctx->prev = find_branch_descriptor(d, z);
890
891         reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
892         flush_writes(ctx->ohci);
893 }
894
895 static void context_stop(struct context *ctx)
896 {
897         u32 reg;
898         int i;
899
900         reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
901         flush_writes(ctx->ohci);
902
903         for (i = 0; i < 10; i++) {
904                 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
905                 if ((reg & CONTEXT_ACTIVE) == 0)
906                         return;
907
908                 mdelay(1);
909         }
910         fw_error("Error: DMA context still active (0x%08x)\n", reg);
911 }
912
913 struct driver_data {
914         struct fw_packet *packet;
915 };
916
917 /*
918  * This function apppends a packet to the DMA queue for transmission.
919  * Must always be called with the ochi->lock held to ensure proper
920  * generation handling and locking around packet queue manipulation.
921  */
922 static int at_context_queue_packet(struct context *ctx,
923                                    struct fw_packet *packet)
924 {
925         struct fw_ohci *ohci = ctx->ohci;
926         dma_addr_t d_bus, uninitialized_var(payload_bus);
927         struct driver_data *driver_data;
928         struct descriptor *d, *last;
929         __le32 *header;
930         int z, tcode;
931         u32 reg;
932
933         d = context_get_descriptors(ctx, 4, &d_bus);
934         if (d == NULL) {
935                 packet->ack = RCODE_SEND_ERROR;
936                 return -1;
937         }
938
939         d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
940         d[0].res_count = cpu_to_le16(packet->timestamp);
941
942         /*
943          * The DMA format for asyncronous link packets is different
944          * from the IEEE1394 layout, so shift the fields around
945          * accordingly.  If header_length is 8, it's a PHY packet, to
946          * which we need to prepend an extra quadlet.
947          */
948
949         header = (__le32 *) &d[1];
950         switch (packet->header_length) {
951         case 16:
952         case 12:
953                 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
954                                         (packet->speed << 16));
955                 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
956                                         (packet->header[0] & 0xffff0000));
957                 header[2] = cpu_to_le32(packet->header[2]);
958
959                 tcode = (packet->header[0] >> 4) & 0x0f;
960                 if (TCODE_IS_BLOCK_PACKET(tcode))
961                         header[3] = cpu_to_le32(packet->header[3]);
962                 else
963                         header[3] = (__force __le32) packet->header[3];
964
965                 d[0].req_count = cpu_to_le16(packet->header_length);
966                 break;
967
968         case 8:
969                 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
970                                         (packet->speed << 16));
971                 header[1] = cpu_to_le32(packet->header[0]);
972                 header[2] = cpu_to_le32(packet->header[1]);
973                 d[0].req_count = cpu_to_le16(12);
974                 break;
975
976         case 4:
977                 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
978                                         (packet->speed << 16));
979                 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
980                 d[0].req_count = cpu_to_le16(8);
981                 break;
982
983         default:
984                 /* BUG(); */
985                 packet->ack = RCODE_SEND_ERROR;
986                 return -1;
987         }
988
989         driver_data = (struct driver_data *) &d[3];
990         driver_data->packet = packet;
991         packet->driver_data = driver_data;
992
993         if (packet->payload_length > 0) {
994                 payload_bus =
995                         dma_map_single(ohci->card.device, packet->payload,
996                                        packet->payload_length, DMA_TO_DEVICE);
997                 if (dma_mapping_error(ohci->card.device, payload_bus)) {
998                         packet->ack = RCODE_SEND_ERROR;
999                         return -1;
1000                 }
1001                 packet->payload_bus     = payload_bus;
1002                 packet->payload_mapped  = true;
1003
1004                 d[2].req_count    = cpu_to_le16(packet->payload_length);
1005                 d[2].data_address = cpu_to_le32(payload_bus);
1006                 last = &d[2];
1007                 z = 3;
1008         } else {
1009                 last = &d[0];
1010                 z = 2;
1011         }
1012
1013         last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1014                                      DESCRIPTOR_IRQ_ALWAYS |
1015                                      DESCRIPTOR_BRANCH_ALWAYS);
1016
1017         /*
1018          * If the controller and packet generations don't match, we need to
1019          * bail out and try again.  If IntEvent.busReset is set, the AT context
1020          * is halted, so appending to the context and trying to run it is
1021          * futile.  Most controllers do the right thing and just flush the AT
1022          * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1023          * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1024          * up stalling out.  So we just bail out in software and try again
1025          * later, and everyone is happy.
1026          * FIXME: Document how the locking works.
1027          */
1028         if (ohci->generation != packet->generation ||
1029             reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1030                 if (packet->payload_mapped)
1031                         dma_unmap_single(ohci->card.device, payload_bus,
1032                                          packet->payload_length, DMA_TO_DEVICE);
1033                 packet->ack = RCODE_GENERATION;
1034                 return -1;
1035         }
1036
1037         context_append(ctx, d, z, 4 - z);
1038
1039         /* If the context isn't already running, start it up. */
1040         reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1041         if ((reg & CONTEXT_RUN) == 0)
1042                 context_run(ctx, 0);
1043
1044         return 0;
1045 }
1046
1047 static int handle_at_packet(struct context *context,
1048                             struct descriptor *d,
1049                             struct descriptor *last)
1050 {
1051         struct driver_data *driver_data;
1052         struct fw_packet *packet;
1053         struct fw_ohci *ohci = context->ohci;
1054         int evt;
1055
1056         if (last->transfer_status == 0)
1057                 /* This descriptor isn't done yet, stop iteration. */
1058                 return 0;
1059
1060         driver_data = (struct driver_data *) &d[3];
1061         packet = driver_data->packet;
1062         if (packet == NULL)
1063                 /* This packet was cancelled, just continue. */
1064                 return 1;
1065
1066         if (packet->payload_mapped)
1067                 dma_unmap_single(ohci->card.device, packet->payload_bus,
1068                                  packet->payload_length, DMA_TO_DEVICE);
1069
1070         evt = le16_to_cpu(last->transfer_status) & 0x1f;
1071         packet->timestamp = le16_to_cpu(last->res_count);
1072
1073         log_ar_at_event('T', packet->speed, packet->header, evt);
1074
1075         switch (evt) {
1076         case OHCI1394_evt_timeout:
1077                 /* Async response transmit timed out. */
1078                 packet->ack = RCODE_CANCELLED;
1079                 break;
1080
1081         case OHCI1394_evt_flushed:
1082                 /*
1083                  * The packet was flushed should give same error as
1084                  * when we try to use a stale generation count.
1085                  */
1086                 packet->ack = RCODE_GENERATION;
1087                 break;
1088
1089         case OHCI1394_evt_missing_ack:
1090                 /*
1091                  * Using a valid (current) generation count, but the
1092                  * node is not on the bus or not sending acks.
1093                  */
1094                 packet->ack = RCODE_NO_ACK;
1095                 break;
1096
1097         case ACK_COMPLETE + 0x10:
1098         case ACK_PENDING + 0x10:
1099         case ACK_BUSY_X + 0x10:
1100         case ACK_BUSY_A + 0x10:
1101         case ACK_BUSY_B + 0x10:
1102         case ACK_DATA_ERROR + 0x10:
1103         case ACK_TYPE_ERROR + 0x10:
1104                 packet->ack = evt - 0x10;
1105                 break;
1106
1107         default:
1108                 packet->ack = RCODE_SEND_ERROR;
1109                 break;
1110         }
1111
1112         packet->callback(packet, &ohci->card, packet->ack);
1113
1114         return 1;
1115 }
1116
1117 #define HEADER_GET_DESTINATION(q)       (((q) >> 16) & 0xffff)
1118 #define HEADER_GET_TCODE(q)             (((q) >> 4) & 0x0f)
1119 #define HEADER_GET_OFFSET_HIGH(q)       (((q) >> 0) & 0xffff)
1120 #define HEADER_GET_DATA_LENGTH(q)       (((q) >> 16) & 0xffff)
1121 #define HEADER_GET_EXTENDED_TCODE(q)    (((q) >> 0) & 0xffff)
1122
1123 static void handle_local_rom(struct fw_ohci *ohci,
1124                              struct fw_packet *packet, u32 csr)
1125 {
1126         struct fw_packet response;
1127         int tcode, length, i;
1128
1129         tcode = HEADER_GET_TCODE(packet->header[0]);
1130         if (TCODE_IS_BLOCK_PACKET(tcode))
1131                 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1132         else
1133                 length = 4;
1134
1135         i = csr - CSR_CONFIG_ROM;
1136         if (i + length > CONFIG_ROM_SIZE) {
1137                 fw_fill_response(&response, packet->header,
1138                                  RCODE_ADDRESS_ERROR, NULL, 0);
1139         } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1140                 fw_fill_response(&response, packet->header,
1141                                  RCODE_TYPE_ERROR, NULL, 0);
1142         } else {
1143                 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1144                                  (void *) ohci->config_rom + i, length);
1145         }
1146
1147         fw_core_handle_response(&ohci->card, &response);
1148 }
1149
1150 static void handle_local_lock(struct fw_ohci *ohci,
1151                               struct fw_packet *packet, u32 csr)
1152 {
1153         struct fw_packet response;
1154         int tcode, length, ext_tcode, sel;
1155         __be32 *payload, lock_old;
1156         u32 lock_arg, lock_data;
1157
1158         tcode = HEADER_GET_TCODE(packet->header[0]);
1159         length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1160         payload = packet->payload;
1161         ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1162
1163         if (tcode == TCODE_LOCK_REQUEST &&
1164             ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1165                 lock_arg = be32_to_cpu(payload[0]);
1166                 lock_data = be32_to_cpu(payload[1]);
1167         } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1168                 lock_arg = 0;
1169                 lock_data = 0;
1170         } else {
1171                 fw_fill_response(&response, packet->header,
1172                                  RCODE_TYPE_ERROR, NULL, 0);
1173                 goto out;
1174         }
1175
1176         sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1177         reg_write(ohci, OHCI1394_CSRData, lock_data);
1178         reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1179         reg_write(ohci, OHCI1394_CSRControl, sel);
1180
1181         if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1182                 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
1183         else
1184                 fw_notify("swap not done yet\n");
1185
1186         fw_fill_response(&response, packet->header,
1187                          RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1188  out:
1189         fw_core_handle_response(&ohci->card, &response);
1190 }
1191
1192 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1193 {
1194         u64 offset;
1195         u32 csr;
1196
1197         if (ctx == &ctx->ohci->at_request_ctx) {
1198                 packet->ack = ACK_PENDING;
1199                 packet->callback(packet, &ctx->ohci->card, packet->ack);
1200         }
1201
1202         offset =
1203                 ((unsigned long long)
1204                  HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1205                 packet->header[2];
1206         csr = offset - CSR_REGISTER_BASE;
1207
1208         /* Handle config rom reads. */
1209         if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1210                 handle_local_rom(ctx->ohci, packet, csr);
1211         else switch (csr) {
1212         case CSR_BUS_MANAGER_ID:
1213         case CSR_BANDWIDTH_AVAILABLE:
1214         case CSR_CHANNELS_AVAILABLE_HI:
1215         case CSR_CHANNELS_AVAILABLE_LO:
1216                 handle_local_lock(ctx->ohci, packet, csr);
1217                 break;
1218         default:
1219                 if (ctx == &ctx->ohci->at_request_ctx)
1220                         fw_core_handle_request(&ctx->ohci->card, packet);
1221                 else
1222                         fw_core_handle_response(&ctx->ohci->card, packet);
1223                 break;
1224         }
1225
1226         if (ctx == &ctx->ohci->at_response_ctx) {
1227                 packet->ack = ACK_COMPLETE;
1228                 packet->callback(packet, &ctx->ohci->card, packet->ack);
1229         }
1230 }
1231
1232 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1233 {
1234         unsigned long flags;
1235         int ret;
1236
1237         spin_lock_irqsave(&ctx->ohci->lock, flags);
1238
1239         if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1240             ctx->ohci->generation == packet->generation) {
1241                 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1242                 handle_local_request(ctx, packet);
1243                 return;
1244         }
1245
1246         ret = at_context_queue_packet(ctx, packet);
1247         spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1248
1249         if (ret < 0)
1250                 packet->callback(packet, &ctx->ohci->card, packet->ack);
1251
1252 }
1253
1254 static void bus_reset_tasklet(unsigned long data)
1255 {
1256         struct fw_ohci *ohci = (struct fw_ohci *)data;
1257         int self_id_count, i, j, reg;
1258         int generation, new_generation;
1259         unsigned long flags;
1260         void *free_rom = NULL;
1261         dma_addr_t free_rom_bus = 0;
1262
1263         reg = reg_read(ohci, OHCI1394_NodeID);
1264         if (!(reg & OHCI1394_NodeID_idValid)) {
1265                 fw_notify("node ID not valid, new bus reset in progress\n");
1266                 return;
1267         }
1268         if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1269                 fw_notify("malconfigured bus\n");
1270                 return;
1271         }
1272         ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1273                                OHCI1394_NodeID_nodeNumber);
1274
1275         reg = reg_read(ohci, OHCI1394_SelfIDCount);
1276         if (reg & OHCI1394_SelfIDCount_selfIDError) {
1277                 fw_notify("inconsistent self IDs\n");
1278                 return;
1279         }
1280         /*
1281          * The count in the SelfIDCount register is the number of
1282          * bytes in the self ID receive buffer.  Since we also receive
1283          * the inverted quadlets and a header quadlet, we shift one
1284          * bit extra to get the actual number of self IDs.
1285          */
1286         self_id_count = (reg >> 3) & 0xff;
1287         if (self_id_count == 0 || self_id_count > 252) {
1288                 fw_notify("inconsistent self IDs\n");
1289                 return;
1290         }
1291         generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1292         rmb();
1293
1294         for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1295                 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1296                         fw_notify("inconsistent self IDs\n");
1297                         return;
1298                 }
1299                 ohci->self_id_buffer[j] =
1300                                 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1301         }
1302         rmb();
1303
1304         /*
1305          * Check the consistency of the self IDs we just read.  The
1306          * problem we face is that a new bus reset can start while we
1307          * read out the self IDs from the DMA buffer. If this happens,
1308          * the DMA buffer will be overwritten with new self IDs and we
1309          * will read out inconsistent data.  The OHCI specification
1310          * (section 11.2) recommends a technique similar to
1311          * linux/seqlock.h, where we remember the generation of the
1312          * self IDs in the buffer before reading them out and compare
1313          * it to the current generation after reading them out.  If
1314          * the two generations match we know we have a consistent set
1315          * of self IDs.
1316          */
1317
1318         new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1319         if (new_generation != generation) {
1320                 fw_notify("recursive bus reset detected, "
1321                           "discarding self ids\n");
1322                 return;
1323         }
1324
1325         /* FIXME: Document how the locking works. */
1326         spin_lock_irqsave(&ohci->lock, flags);
1327
1328         ohci->generation = generation;
1329         context_stop(&ohci->at_request_ctx);
1330         context_stop(&ohci->at_response_ctx);
1331         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1332
1333         if (ohci->bus_reset_packet_quirk)
1334                 ohci->request_generation = generation;
1335
1336         /*
1337          * This next bit is unrelated to the AT context stuff but we
1338          * have to do it under the spinlock also.  If a new config rom
1339          * was set up before this reset, the old one is now no longer
1340          * in use and we can free it. Update the config rom pointers
1341          * to point to the current config rom and clear the
1342          * next_config_rom pointer so a new udpate can take place.
1343          */
1344
1345         if (ohci->next_config_rom != NULL) {
1346                 if (ohci->next_config_rom != ohci->config_rom) {
1347                         free_rom      = ohci->config_rom;
1348                         free_rom_bus  = ohci->config_rom_bus;
1349                 }
1350                 ohci->config_rom      = ohci->next_config_rom;
1351                 ohci->config_rom_bus  = ohci->next_config_rom_bus;
1352                 ohci->next_config_rom = NULL;
1353
1354                 /*
1355                  * Restore config_rom image and manually update
1356                  * config_rom registers.  Writing the header quadlet
1357                  * will indicate that the config rom is ready, so we
1358                  * do that last.
1359                  */
1360                 reg_write(ohci, OHCI1394_BusOptions,
1361                           be32_to_cpu(ohci->config_rom[2]));
1362                 ohci->config_rom[0] = ohci->next_header;
1363                 reg_write(ohci, OHCI1394_ConfigROMhdr,
1364                           be32_to_cpu(ohci->next_header));
1365         }
1366
1367 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1368         reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1369         reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1370 #endif
1371
1372         spin_unlock_irqrestore(&ohci->lock, flags);
1373
1374         if (free_rom)
1375                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1376                                   free_rom, free_rom_bus);
1377
1378         log_selfids(ohci->node_id, generation,
1379                     self_id_count, ohci->self_id_buffer);
1380
1381         fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1382                                  self_id_count, ohci->self_id_buffer);
1383 }
1384
1385 static irqreturn_t irq_handler(int irq, void *data)
1386 {
1387         struct fw_ohci *ohci = data;
1388         u32 event, iso_event, cycle_time;
1389         int i;
1390
1391         event = reg_read(ohci, OHCI1394_IntEventClear);
1392
1393         if (!event || !~event)
1394                 return IRQ_NONE;
1395
1396         /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1397         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1398         log_irqs(event);
1399
1400         if (event & OHCI1394_selfIDComplete)
1401                 tasklet_schedule(&ohci->bus_reset_tasklet);
1402
1403         if (event & OHCI1394_RQPkt)
1404                 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1405
1406         if (event & OHCI1394_RSPkt)
1407                 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1408
1409         if (event & OHCI1394_reqTxComplete)
1410                 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1411
1412         if (event & OHCI1394_respTxComplete)
1413                 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1414
1415         iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1416         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1417
1418         while (iso_event) {
1419                 i = ffs(iso_event) - 1;
1420                 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1421                 iso_event &= ~(1 << i);
1422         }
1423
1424         iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1425         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1426
1427         while (iso_event) {
1428                 i = ffs(iso_event) - 1;
1429                 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1430                 iso_event &= ~(1 << i);
1431         }
1432
1433         if (unlikely(event & OHCI1394_regAccessFail))
1434                 fw_error("Register access failure - "
1435                          "please notify linux1394-devel@lists.sf.net\n");
1436
1437         if (unlikely(event & OHCI1394_postedWriteErr))
1438                 fw_error("PCI posted write error\n");
1439
1440         if (unlikely(event & OHCI1394_cycleTooLong)) {
1441                 if (printk_ratelimit())
1442                         fw_notify("isochronous cycle too long\n");
1443                 reg_write(ohci, OHCI1394_LinkControlSet,
1444                           OHCI1394_LinkControl_cycleMaster);
1445         }
1446
1447         if (unlikely(event & OHCI1394_cycleInconsistent)) {
1448                 /*
1449                  * We need to clear this event bit in order to make
1450                  * cycleMatch isochronous I/O work.  In theory we should
1451                  * stop active cycleMatch iso contexts now and restart
1452                  * them at least two cycles later.  (FIXME?)
1453                  */
1454                 if (printk_ratelimit())
1455                         fw_notify("isochronous cycle inconsistent\n");
1456         }
1457
1458         if (event & OHCI1394_cycle64Seconds) {
1459                 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1460                 if ((cycle_time & 0x80000000) == 0)
1461                         atomic_inc(&ohci->bus_seconds);
1462         }
1463
1464         return IRQ_HANDLED;
1465 }
1466
1467 static int software_reset(struct fw_ohci *ohci)
1468 {
1469         int i;
1470
1471         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1472
1473         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1474                 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1475                      OHCI1394_HCControl_softReset) == 0)
1476                         return 0;
1477                 msleep(1);
1478         }
1479
1480         return -EBUSY;
1481 }
1482
1483 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1484 {
1485         size_t size = length * 4;
1486
1487         memcpy(dest, src, size);
1488         if (size < CONFIG_ROM_SIZE)
1489                 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1490 }
1491
1492 static int ohci_enable(struct fw_card *card,
1493                        const __be32 *config_rom, size_t length)
1494 {
1495         struct fw_ohci *ohci = fw_ohci(card);
1496         struct pci_dev *dev = to_pci_dev(card->device);
1497         u32 lps;
1498         int i;
1499
1500         if (software_reset(ohci)) {
1501                 fw_error("Failed to reset ohci card.\n");
1502                 return -EBUSY;
1503         }
1504
1505         /*
1506          * Now enable LPS, which we need in order to start accessing
1507          * most of the registers.  In fact, on some cards (ALI M5251),
1508          * accessing registers in the SClk domain without LPS enabled
1509          * will lock up the machine.  Wait 50msec to make sure we have
1510          * full link enabled.  However, with some cards (well, at least
1511          * a JMicron PCIe card), we have to try again sometimes.
1512          */
1513         reg_write(ohci, OHCI1394_HCControlSet,
1514                   OHCI1394_HCControl_LPS |
1515                   OHCI1394_HCControl_postedWriteEnable);
1516         flush_writes(ohci);
1517
1518         for (lps = 0, i = 0; !lps && i < 3; i++) {
1519                 msleep(50);
1520                 lps = reg_read(ohci, OHCI1394_HCControlSet) &
1521                       OHCI1394_HCControl_LPS;
1522         }
1523
1524         if (!lps) {
1525                 fw_error("Failed to set Link Power Status\n");
1526                 return -EIO;
1527         }
1528
1529         reg_write(ohci, OHCI1394_HCControlClear,
1530                   OHCI1394_HCControl_noByteSwapData);
1531
1532         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1533         reg_write(ohci, OHCI1394_LinkControlClear,
1534                   OHCI1394_LinkControl_rcvPhyPkt);
1535         reg_write(ohci, OHCI1394_LinkControlSet,
1536                   OHCI1394_LinkControl_rcvSelfID |
1537                   OHCI1394_LinkControl_cycleTimerEnable |
1538                   OHCI1394_LinkControl_cycleMaster);
1539
1540         reg_write(ohci, OHCI1394_ATRetries,
1541                   OHCI1394_MAX_AT_REQ_RETRIES |
1542                   (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1543                   (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1544
1545         ar_context_run(&ohci->ar_request_ctx);
1546         ar_context_run(&ohci->ar_response_ctx);
1547
1548         reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1549         reg_write(ohci, OHCI1394_IntEventClear, ~0);
1550         reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1551         reg_write(ohci, OHCI1394_IntMaskSet,
1552                   OHCI1394_selfIDComplete |
1553                   OHCI1394_RQPkt | OHCI1394_RSPkt |
1554                   OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1555                   OHCI1394_isochRx | OHCI1394_isochTx |
1556                   OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1557                   OHCI1394_cycleInconsistent |
1558                   OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1559                   OHCI1394_masterIntEnable);
1560         if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1561                 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1562
1563         /* Activate link_on bit and contender bit in our self ID packets.*/
1564         if (ohci_update_phy_reg(card, 4, 0,
1565                                 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1566                 return -EIO;
1567
1568         /*
1569          * When the link is not yet enabled, the atomic config rom
1570          * update mechanism described below in ohci_set_config_rom()
1571          * is not active.  We have to update ConfigRomHeader and
1572          * BusOptions manually, and the write to ConfigROMmap takes
1573          * effect immediately.  We tie this to the enabling of the
1574          * link, so we have a valid config rom before enabling - the
1575          * OHCI requires that ConfigROMhdr and BusOptions have valid
1576          * values before enabling.
1577          *
1578          * However, when the ConfigROMmap is written, some controllers
1579          * always read back quadlets 0 and 2 from the config rom to
1580          * the ConfigRomHeader and BusOptions registers on bus reset.
1581          * They shouldn't do that in this initial case where the link
1582          * isn't enabled.  This means we have to use the same
1583          * workaround here, setting the bus header to 0 and then write
1584          * the right values in the bus reset tasklet.
1585          */
1586
1587         if (config_rom) {
1588                 ohci->next_config_rom =
1589                         dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1590                                            &ohci->next_config_rom_bus,
1591                                            GFP_KERNEL);
1592                 if (ohci->next_config_rom == NULL)
1593                         return -ENOMEM;
1594
1595                 copy_config_rom(ohci->next_config_rom, config_rom, length);
1596         } else {
1597                 /*
1598                  * In the suspend case, config_rom is NULL, which
1599                  * means that we just reuse the old config rom.
1600                  */
1601                 ohci->next_config_rom = ohci->config_rom;
1602                 ohci->next_config_rom_bus = ohci->config_rom_bus;
1603         }
1604
1605         ohci->next_header = ohci->next_config_rom[0];
1606         ohci->next_config_rom[0] = 0;
1607         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1608         reg_write(ohci, OHCI1394_BusOptions,
1609                   be32_to_cpu(ohci->next_config_rom[2]));
1610         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1611
1612         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1613
1614         if (request_irq(dev->irq, irq_handler,
1615                         IRQF_SHARED, ohci_driver_name, ohci)) {
1616                 fw_error("Failed to allocate shared interrupt %d.\n",
1617                          dev->irq);
1618                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1619                                   ohci->config_rom, ohci->config_rom_bus);
1620                 return -EIO;
1621         }
1622
1623         reg_write(ohci, OHCI1394_HCControlSet,
1624                   OHCI1394_HCControl_linkEnable |
1625                   OHCI1394_HCControl_BIBimageValid);
1626         flush_writes(ohci);
1627
1628         /*
1629          * We are ready to go, initiate bus reset to finish the
1630          * initialization.
1631          */
1632
1633         fw_core_initiate_bus_reset(&ohci->card, 1);
1634
1635         return 0;
1636 }
1637
1638 static int ohci_set_config_rom(struct fw_card *card,
1639                                const __be32 *config_rom, size_t length)
1640 {
1641         struct fw_ohci *ohci;
1642         unsigned long flags;
1643         int ret = -EBUSY;
1644         __be32 *next_config_rom;
1645         dma_addr_t uninitialized_var(next_config_rom_bus);
1646
1647         ohci = fw_ohci(card);
1648
1649         /*
1650          * When the OHCI controller is enabled, the config rom update
1651          * mechanism is a bit tricky, but easy enough to use.  See
1652          * section 5.5.6 in the OHCI specification.
1653          *
1654          * The OHCI controller caches the new config rom address in a
1655          * shadow register (ConfigROMmapNext) and needs a bus reset
1656          * for the changes to take place.  When the bus reset is
1657          * detected, the controller loads the new values for the
1658          * ConfigRomHeader and BusOptions registers from the specified
1659          * config rom and loads ConfigROMmap from the ConfigROMmapNext
1660          * shadow register. All automatically and atomically.
1661          *
1662          * Now, there's a twist to this story.  The automatic load of
1663          * ConfigRomHeader and BusOptions doesn't honor the
1664          * noByteSwapData bit, so with a be32 config rom, the
1665          * controller will load be32 values in to these registers
1666          * during the atomic update, even on litte endian
1667          * architectures.  The workaround we use is to put a 0 in the
1668          * header quadlet; 0 is endian agnostic and means that the
1669          * config rom isn't ready yet.  In the bus reset tasklet we
1670          * then set up the real values for the two registers.
1671          *
1672          * We use ohci->lock to avoid racing with the code that sets
1673          * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1674          */
1675
1676         next_config_rom =
1677                 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1678                                    &next_config_rom_bus, GFP_KERNEL);
1679         if (next_config_rom == NULL)
1680                 return -ENOMEM;
1681
1682         spin_lock_irqsave(&ohci->lock, flags);
1683
1684         if (ohci->next_config_rom == NULL) {
1685                 ohci->next_config_rom = next_config_rom;
1686                 ohci->next_config_rom_bus = next_config_rom_bus;
1687
1688                 copy_config_rom(ohci->next_config_rom, config_rom, length);
1689
1690                 ohci->next_header = config_rom[0];
1691                 ohci->next_config_rom[0] = 0;
1692
1693                 reg_write(ohci, OHCI1394_ConfigROMmap,
1694                           ohci->next_config_rom_bus);
1695                 ret = 0;
1696         }
1697
1698         spin_unlock_irqrestore(&ohci->lock, flags);
1699
1700         /*
1701          * Now initiate a bus reset to have the changes take
1702          * effect. We clean up the old config rom memory and DMA
1703          * mappings in the bus reset tasklet, since the OHCI
1704          * controller could need to access it before the bus reset
1705          * takes effect.
1706          */
1707         if (ret == 0)
1708                 fw_core_initiate_bus_reset(&ohci->card, 1);
1709         else
1710                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1711                                   next_config_rom, next_config_rom_bus);
1712
1713         return ret;
1714 }
1715
1716 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1717 {
1718         struct fw_ohci *ohci = fw_ohci(card);
1719
1720         at_context_transmit(&ohci->at_request_ctx, packet);
1721 }
1722
1723 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1724 {
1725         struct fw_ohci *ohci = fw_ohci(card);
1726
1727         at_context_transmit(&ohci->at_response_ctx, packet);
1728 }
1729
1730 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1731 {
1732         struct fw_ohci *ohci = fw_ohci(card);
1733         struct context *ctx = &ohci->at_request_ctx;
1734         struct driver_data *driver_data = packet->driver_data;
1735         int ret = -ENOENT;
1736
1737         tasklet_disable(&ctx->tasklet);
1738
1739         if (packet->ack != 0)
1740                 goto out;
1741
1742         if (packet->payload_mapped)
1743                 dma_unmap_single(ohci->card.device, packet->payload_bus,
1744                                  packet->payload_length, DMA_TO_DEVICE);
1745
1746         log_ar_at_event('T', packet->speed, packet->header, 0x20);
1747         driver_data->packet = NULL;
1748         packet->ack = RCODE_CANCELLED;
1749         packet->callback(packet, &ohci->card, packet->ack);
1750         ret = 0;
1751  out:
1752         tasklet_enable(&ctx->tasklet);
1753
1754         return ret;
1755 }
1756
1757 static int ohci_enable_phys_dma(struct fw_card *card,
1758                                 int node_id, int generation)
1759 {
1760 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1761         return 0;
1762 #else
1763         struct fw_ohci *ohci = fw_ohci(card);
1764         unsigned long flags;
1765         int n, ret = 0;
1766
1767         /*
1768          * FIXME:  Make sure this bitmask is cleared when we clear the busReset
1769          * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
1770          */
1771
1772         spin_lock_irqsave(&ohci->lock, flags);
1773
1774         if (ohci->generation != generation) {
1775                 ret = -ESTALE;
1776                 goto out;
1777         }
1778
1779         /*
1780          * Note, if the node ID contains a non-local bus ID, physical DMA is
1781          * enabled for _all_ nodes on remote buses.
1782          */
1783
1784         n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1785         if (n < 32)
1786                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1787         else
1788                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1789
1790         flush_writes(ohci);
1791  out:
1792         spin_unlock_irqrestore(&ohci->lock, flags);
1793
1794         return ret;
1795 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1796 }
1797
1798 static inline u32 cycle_timer_ticks(u32 cycle_timer)
1799 {
1800         u32 ticks;
1801
1802         ticks = cycle_timer & 0xfff;
1803         ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1804         ticks += (3072 * 8000) * (cycle_timer >> 25);
1805         return ticks;
1806 }
1807
1808 static u64 ohci_get_bus_time(struct fw_card *card)
1809 {
1810         struct fw_ohci *ohci = fw_ohci(card);
1811         u32 c0, c1, c2;
1812         u32 t0, t1, t2;
1813         s32 diff01, diff12;
1814         u64 bus_time;
1815
1816         if (!ohci->iso_cycle_timer_quirk) {
1817                 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1818         } else {
1819                 /*
1820                  * Some controllers exhibit one or more of the following bugs
1821                  * when updating the iso cycle timer register:
1822                  *  - When the lowest six bits are wrapping around to zero,
1823                  *    a read that happens at the same time will return garbage
1824                  *    in the lowest ten bits.
1825                  *  - When the cycleOffset field wraps around to zero, the
1826                  *    cycleCount field is not incremented for about 60 ns.
1827                  *  - Occasionally, the entire register reads zero.
1828                  *
1829                  * To catch these, we read the register three times and ensure
1830                  * that the difference between each two consecutive reads is
1831                  * approximately the same, i.e., less than twice the other.
1832                  * Furthermore, any negative difference indicates an error.
1833                  * (A PCI read should take at least 20 ticks of the 24.576 MHz
1834                  * timer to execute, so we have enough precision to compute the
1835                  * ratio of the differences.)
1836                  */
1837                 do {
1838                         c0 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1839                         c1 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1840                         c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1841                         t0 = cycle_timer_ticks(c0);
1842                         t1 = cycle_timer_ticks(c1);
1843                         t2 = cycle_timer_ticks(c2);
1844                         diff01 = t1 - t0;
1845                         diff12 = t2 - t1;
1846                 } while (diff01 <= 0 || diff12 <= 0 ||
1847                          diff01 / diff12 >= 2 || diff12 / diff01 >= 2);
1848         }
1849         bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | c2;
1850
1851         return bus_time;
1852 }
1853
1854 static void copy_iso_headers(struct iso_context *ctx, void *p)
1855 {
1856         int i = ctx->header_length;
1857
1858         if (i + ctx->base.header_size > PAGE_SIZE)
1859                 return;
1860
1861         /*
1862          * The iso header is byteswapped to little endian by
1863          * the controller, but the remaining header quadlets
1864          * are big endian.  We want to present all the headers
1865          * as big endian, so we have to swap the first quadlet.
1866          */
1867         if (ctx->base.header_size > 0)
1868                 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1869         if (ctx->base.header_size > 4)
1870                 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
1871         if (ctx->base.header_size > 8)
1872                 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
1873         ctx->header_length += ctx->base.header_size;
1874 }
1875
1876 static int handle_ir_dualbuffer_packet(struct context *context,
1877                                        struct descriptor *d,
1878                                        struct descriptor *last)
1879 {
1880         struct iso_context *ctx =
1881                 container_of(context, struct iso_context, context);
1882         struct db_descriptor *db = (struct db_descriptor *) d;
1883         __le32 *ir_header;
1884         size_t header_length;
1885         void *p, *end;
1886
1887         if (db->first_res_count != 0 && db->second_res_count != 0) {
1888                 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1889                         /* This descriptor isn't done yet, stop iteration. */
1890                         return 0;
1891                 }
1892                 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1893         }
1894
1895         header_length = le16_to_cpu(db->first_req_count) -
1896                 le16_to_cpu(db->first_res_count);
1897
1898         p = db + 1;
1899         end = p + header_length;
1900         while (p < end) {
1901                 copy_iso_headers(ctx, p);
1902                 ctx->excess_bytes +=
1903                         (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1904                 p += max(ctx->base.header_size, (size_t)8);
1905         }
1906
1907         ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1908                 le16_to_cpu(db->second_res_count);
1909
1910         if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1911                 ir_header = (__le32 *) (db + 1);
1912                 ctx->base.callback(&ctx->base,
1913                                    le32_to_cpu(ir_header[0]) & 0xffff,
1914                                    ctx->header_length, ctx->header,
1915                                    ctx->base.callback_data);
1916                 ctx->header_length = 0;
1917         }
1918
1919         return 1;
1920 }
1921
1922 static int handle_ir_packet_per_buffer(struct context *context,
1923                                        struct descriptor *d,
1924                                        struct descriptor *last)
1925 {
1926         struct iso_context *ctx =
1927                 container_of(context, struct iso_context, context);
1928         struct descriptor *pd;
1929         __le32 *ir_header;
1930         void *p;
1931
1932         for (pd = d; pd <= last; pd++) {
1933                 if (pd->transfer_status)
1934                         break;
1935         }
1936         if (pd > last)
1937                 /* Descriptor(s) not done yet, stop iteration */
1938                 return 0;
1939
1940         p = last + 1;
1941         copy_iso_headers(ctx, p);
1942
1943         if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1944                 ir_header = (__le32 *) p;
1945                 ctx->base.callback(&ctx->base,
1946                                    le32_to_cpu(ir_header[0]) & 0xffff,
1947                                    ctx->header_length, ctx->header,
1948                                    ctx->base.callback_data);
1949                 ctx->header_length = 0;
1950         }
1951
1952         return 1;
1953 }
1954
1955 static int handle_it_packet(struct context *context,
1956                             struct descriptor *d,
1957                             struct descriptor *last)
1958 {
1959         struct iso_context *ctx =
1960                 container_of(context, struct iso_context, context);
1961         int i;
1962         struct descriptor *pd;
1963
1964         for (pd = d; pd <= last; pd++)
1965                 if (pd->transfer_status)
1966                         break;
1967         if (pd > last)
1968                 /* Descriptor(s) not done yet, stop iteration */
1969                 return 0;
1970
1971         i = ctx->header_length;
1972         if (i + 4 < PAGE_SIZE) {
1973                 /* Present this value as big-endian to match the receive code */
1974                 *(__be32 *)(ctx->header + i) = cpu_to_be32(
1975                                 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
1976                                 le16_to_cpu(pd->res_count));
1977                 ctx->header_length += 4;
1978         }
1979         if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1980                 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1981                                    ctx->header_length, ctx->header,
1982                                    ctx->base.callback_data);
1983                 ctx->header_length = 0;
1984         }
1985         return 1;
1986 }
1987
1988 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1989                                 int type, int channel, size_t header_size)
1990 {
1991         struct fw_ohci *ohci = fw_ohci(card);
1992         struct iso_context *ctx, *list;
1993         descriptor_callback_t callback;
1994         u64 *channels, dont_care = ~0ULL;
1995         u32 *mask, regs;
1996         unsigned long flags;
1997         int index, ret = -ENOMEM;
1998
1999         if (type == FW_ISO_CONTEXT_TRANSMIT) {
2000                 channels = &dont_care;
2001                 mask = &ohci->it_context_mask;
2002                 list = ohci->it_context_list;
2003                 callback = handle_it_packet;
2004         } else {
2005                 channels = &ohci->ir_context_channels;
2006                 mask = &ohci->ir_context_mask;
2007                 list = ohci->ir_context_list;
2008                 if (ohci->use_dualbuffer)
2009                         callback = handle_ir_dualbuffer_packet;
2010                 else
2011                         callback = handle_ir_packet_per_buffer;
2012         }
2013
2014         spin_lock_irqsave(&ohci->lock, flags);
2015         index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2016         if (index >= 0) {
2017                 *channels &= ~(1ULL << channel);
2018                 *mask &= ~(1 << index);
2019         }
2020         spin_unlock_irqrestore(&ohci->lock, flags);
2021
2022         if (index < 0)
2023                 return ERR_PTR(-EBUSY);
2024
2025         if (type == FW_ISO_CONTEXT_TRANSMIT)
2026                 regs = OHCI1394_IsoXmitContextBase(index);
2027         else
2028                 regs = OHCI1394_IsoRcvContextBase(index);
2029
2030         ctx = &list[index];
2031         memset(ctx, 0, sizeof(*ctx));
2032         ctx->header_length = 0;
2033         ctx->header = (void *) __get_free_page(GFP_KERNEL);
2034         if (ctx->header == NULL)
2035                 goto out;
2036
2037         ret = context_init(&ctx->context, ohci, regs, callback);
2038         if (ret < 0)
2039                 goto out_with_header;
2040
2041         return &ctx->base;
2042
2043  out_with_header:
2044         free_page((unsigned long)ctx->header);
2045  out:
2046         spin_lock_irqsave(&ohci->lock, flags);
2047         *mask |= 1 << index;
2048         spin_unlock_irqrestore(&ohci->lock, flags);
2049
2050         return ERR_PTR(ret);
2051 }
2052
2053 static int ohci_start_iso(struct fw_iso_context *base,
2054                           s32 cycle, u32 sync, u32 tags)
2055 {
2056         struct iso_context *ctx = container_of(base, struct iso_context, base);
2057         struct fw_ohci *ohci = ctx->context.ohci;
2058         u32 control, match;
2059         int index;
2060
2061         if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2062                 index = ctx - ohci->it_context_list;
2063                 match = 0;
2064                 if (cycle >= 0)
2065                         match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
2066                                 (cycle & 0x7fff) << 16;
2067
2068                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2069                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2070                 context_run(&ctx->context, match);
2071         } else {
2072                 index = ctx - ohci->ir_context_list;
2073                 control = IR_CONTEXT_ISOCH_HEADER;
2074                 if (ohci->use_dualbuffer)
2075                         control |= IR_CONTEXT_DUAL_BUFFER_MODE;
2076                 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2077                 if (cycle >= 0) {
2078                         match |= (cycle & 0x07fff) << 12;
2079                         control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2080                 }
2081
2082                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2083                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2084                 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2085                 context_run(&ctx->context, control);
2086         }
2087
2088         return 0;
2089 }
2090
2091 static int ohci_stop_iso(struct fw_iso_context *base)
2092 {
2093         struct fw_ohci *ohci = fw_ohci(base->card);
2094         struct iso_context *ctx = container_of(base, struct iso_context, base);
2095         int index;
2096
2097         if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2098                 index = ctx - ohci->it_context_list;
2099                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2100         } else {
2101                 index = ctx - ohci->ir_context_list;
2102                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2103         }
2104         flush_writes(ohci);
2105         context_stop(&ctx->context);
2106
2107         return 0;
2108 }
2109
2110 static void ohci_free_iso_context(struct fw_iso_context *base)
2111 {
2112         struct fw_ohci *ohci = fw_ohci(base->card);
2113         struct iso_context *ctx = container_of(base, struct iso_context, base);
2114         unsigned long flags;
2115         int index;
2116
2117         ohci_stop_iso(base);
2118         context_release(&ctx->context);
2119         free_page((unsigned long)ctx->header);
2120
2121         spin_lock_irqsave(&ohci->lock, flags);
2122
2123         if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2124                 index = ctx - ohci->it_context_list;
2125                 ohci->it_context_mask |= 1 << index;
2126         } else {
2127                 index = ctx - ohci->ir_context_list;
2128                 ohci->ir_context_mask |= 1 << index;
2129                 ohci->ir_context_channels |= 1ULL << base->channel;
2130         }
2131
2132         spin_unlock_irqrestore(&ohci->lock, flags);
2133 }
2134
2135 static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2136                                    struct fw_iso_packet *packet,
2137                                    struct fw_iso_buffer *buffer,
2138                                    unsigned long payload)
2139 {
2140         struct iso_context *ctx = container_of(base, struct iso_context, base);
2141         struct descriptor *d, *last, *pd;
2142         struct fw_iso_packet *p;
2143         __le32 *header;
2144         dma_addr_t d_bus, page_bus;
2145         u32 z, header_z, payload_z, irq;
2146         u32 payload_index, payload_end_index, next_page_index;
2147         int page, end_page, i, length, offset;
2148
2149         /*
2150          * FIXME: Cycle lost behavior should be configurable: lose
2151          * packet, retransmit or terminate..
2152          */
2153
2154         p = packet;
2155         payload_index = payload;
2156
2157         if (p->skip)
2158                 z = 1;
2159         else
2160                 z = 2;
2161         if (p->header_length > 0)
2162                 z++;
2163
2164         /* Determine the first page the payload isn't contained in. */
2165         end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2166         if (p->payload_length > 0)
2167                 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2168         else
2169                 payload_z = 0;
2170
2171         z += payload_z;
2172
2173         /* Get header size in number of descriptors. */
2174         header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
2175
2176         d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2177         if (d == NULL)
2178                 return -ENOMEM;
2179
2180         if (!p->skip) {
2181                 d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2182                 d[0].req_count = cpu_to_le16(8);
2183
2184                 header = (__le32 *) &d[1];
2185                 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2186                                         IT_HEADER_TAG(p->tag) |
2187                                         IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2188                                         IT_HEADER_CHANNEL(ctx->base.channel) |
2189                                         IT_HEADER_SPEED(ctx->base.speed));
2190                 header[1] =
2191                         cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2192                                                           p->payload_length));
2193         }
2194
2195         if (p->header_length > 0) {
2196                 d[2].req_count    = cpu_to_le16(p->header_length);
2197                 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2198                 memcpy(&d[z], p->header, p->header_length);
2199         }
2200
2201         pd = d + z - payload_z;
2202         payload_end_index = payload_index + p->payload_length;
2203         for (i = 0; i < payload_z; i++) {
2204                 page               = payload_index >> PAGE_SHIFT;
2205                 offset             = payload_index & ~PAGE_MASK;
2206                 next_page_index    = (page + 1) << PAGE_SHIFT;
2207                 length             =
2208                         min(next_page_index, payload_end_index) - payload_index;
2209                 pd[i].req_count    = cpu_to_le16(length);
2210
2211                 page_bus = page_private(buffer->pages[page]);
2212                 pd[i].data_address = cpu_to_le32(page_bus + offset);
2213
2214                 payload_index += length;
2215         }
2216
2217         if (p->interrupt)
2218                 irq = DESCRIPTOR_IRQ_ALWAYS;
2219         else
2220                 irq = DESCRIPTOR_NO_IRQ;
2221
2222         last = z == 2 ? d : d + z - 1;
2223         last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2224                                      DESCRIPTOR_STATUS |
2225                                      DESCRIPTOR_BRANCH_ALWAYS |
2226                                      irq);
2227
2228         context_append(&ctx->context, d, z, header_z);
2229
2230         return 0;
2231 }
2232
2233 static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2234                                              struct fw_iso_packet *packet,
2235                                              struct fw_iso_buffer *buffer,
2236                                              unsigned long payload)
2237 {
2238         struct iso_context *ctx = container_of(base, struct iso_context, base);
2239         struct db_descriptor *db = NULL;
2240         struct descriptor *d;
2241         struct fw_iso_packet *p;
2242         dma_addr_t d_bus, page_bus;
2243         u32 z, header_z, length, rest;
2244         int page, offset, packet_count, header_size;
2245
2246         /*
2247          * FIXME: Cycle lost behavior should be configurable: lose
2248          * packet, retransmit or terminate..
2249          */
2250
2251         p = packet;
2252         z = 2;
2253
2254         /*
2255          * The OHCI controller puts the isochronous header and trailer in the
2256          * buffer, so we need at least 8 bytes.
2257          */
2258         packet_count = p->header_length / ctx->base.header_size;
2259         header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2260
2261         /* Get header size in number of descriptors. */
2262         header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2263         page     = payload >> PAGE_SHIFT;
2264         offset   = payload & ~PAGE_MASK;
2265         rest     = p->payload_length;
2266         /*
2267          * The controllers I've tested have not worked correctly when
2268          * second_req_count is zero.  Rather than do something we know won't
2269          * work, return an error
2270          */
2271         if (rest == 0)
2272                 return -EINVAL;
2273
2274         while (rest > 0) {
2275                 d = context_get_descriptors(&ctx->context,
2276                                             z + header_z, &d_bus);
2277                 if (d == NULL)
2278                         return -ENOMEM;
2279
2280                 db = (struct db_descriptor *) d;
2281                 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2282                                           DESCRIPTOR_BRANCH_ALWAYS);
2283                 db->first_size =
2284                     cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2285                 if (p->skip && rest == p->payload_length) {
2286                         db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2287                         db->first_req_count = db->first_size;
2288                 } else {
2289                         db->first_req_count = cpu_to_le16(header_size);
2290                 }
2291                 db->first_res_count = db->first_req_count;
2292                 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2293
2294                 if (p->skip && rest == p->payload_length)
2295                         length = 4;
2296                 else if (offset + rest < PAGE_SIZE)
2297                         length = rest;
2298                 else
2299                         length = PAGE_SIZE - offset;
2300
2301                 db->second_req_count = cpu_to_le16(length);
2302                 db->second_res_count = db->second_req_count;
2303                 page_bus = page_private(buffer->pages[page]);
2304                 db->second_buffer = cpu_to_le32(page_bus + offset);
2305
2306                 if (p->interrupt && length == rest)
2307                         db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2308
2309                 context_append(&ctx->context, d, z, header_z);
2310                 offset = (offset + length) & ~PAGE_MASK;
2311                 rest -= length;
2312                 if (offset == 0)
2313                         page++;
2314         }
2315
2316         return 0;
2317 }
2318
2319 static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2320                                         struct fw_iso_packet *packet,
2321                                         struct fw_iso_buffer *buffer,
2322                                         unsigned long payload)
2323 {
2324         struct iso_context *ctx = container_of(base, struct iso_context, base);
2325         struct descriptor *d, *pd;
2326         struct fw_iso_packet *p = packet;
2327         dma_addr_t d_bus, page_bus;
2328         u32 z, header_z, rest;
2329         int i, j, length;
2330         int page, offset, packet_count, header_size, payload_per_buffer;
2331
2332         /*
2333          * The OHCI controller puts the isochronous header and trailer in the
2334          * buffer, so we need at least 8 bytes.
2335          */
2336         packet_count = p->header_length / ctx->base.header_size;
2337         header_size  = max(ctx->base.header_size, (size_t)8);
2338
2339         /* Get header size in number of descriptors. */
2340         header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2341         page     = payload >> PAGE_SHIFT;
2342         offset   = payload & ~PAGE_MASK;
2343         payload_per_buffer = p->payload_length / packet_count;
2344
2345         for (i = 0; i < packet_count; i++) {
2346                 /* d points to the header descriptor */
2347                 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2348                 d = context_get_descriptors(&ctx->context,
2349                                 z + header_z, &d_bus);
2350                 if (d == NULL)
2351                         return -ENOMEM;
2352
2353                 d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
2354                                               DESCRIPTOR_INPUT_MORE);
2355                 if (p->skip && i == 0)
2356                         d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2357                 d->req_count    = cpu_to_le16(header_size);
2358                 d->res_count    = d->req_count;
2359                 d->transfer_status = 0;
2360                 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2361
2362                 rest = payload_per_buffer;
2363                 pd = d;
2364                 for (j = 1; j < z; j++) {
2365                         pd++;
2366                         pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2367                                                   DESCRIPTOR_INPUT_MORE);
2368
2369                         if (offset + rest < PAGE_SIZE)
2370                                 length = rest;
2371                         else
2372                                 length = PAGE_SIZE - offset;
2373                         pd->req_count = cpu_to_le16(length);
2374                         pd->res_count = pd->req_count;
2375                         pd->transfer_status = 0;
2376
2377                         page_bus = page_private(buffer->pages[page]);
2378                         pd->data_address = cpu_to_le32(page_bus + offset);
2379
2380                         offset = (offset + length) & ~PAGE_MASK;
2381                         rest -= length;
2382                         if (offset == 0)
2383                                 page++;
2384                 }
2385                 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2386                                           DESCRIPTOR_INPUT_LAST |
2387                                           DESCRIPTOR_BRANCH_ALWAYS);
2388                 if (p->interrupt && i == packet_count - 1)
2389                         pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2390
2391                 context_append(&ctx->context, d, z, header_z);
2392         }
2393
2394         return 0;
2395 }
2396
2397 static int ohci_queue_iso(struct fw_iso_context *base,
2398                           struct fw_iso_packet *packet,
2399                           struct fw_iso_buffer *buffer,
2400                           unsigned long payload)
2401 {
2402         struct iso_context *ctx = container_of(base, struct iso_context, base);
2403         unsigned long flags;
2404         int ret;
2405
2406         spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2407         if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2408                 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2409         else if (ctx->context.ohci->use_dualbuffer)
2410                 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2411                                                         buffer, payload);
2412         else
2413                 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2414                                                         buffer, payload);
2415         spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2416
2417         return ret;
2418 }
2419
2420 static const struct fw_card_driver ohci_driver = {
2421         .enable                 = ohci_enable,
2422         .update_phy_reg         = ohci_update_phy_reg,
2423         .set_config_rom         = ohci_set_config_rom,
2424         .send_request           = ohci_send_request,
2425         .send_response          = ohci_send_response,
2426         .cancel_packet          = ohci_cancel_packet,
2427         .enable_phys_dma        = ohci_enable_phys_dma,
2428         .get_bus_time           = ohci_get_bus_time,
2429
2430         .allocate_iso_context   = ohci_allocate_iso_context,
2431         .free_iso_context       = ohci_free_iso_context,
2432         .queue_iso              = ohci_queue_iso,
2433         .start_iso              = ohci_start_iso,
2434         .stop_iso               = ohci_stop_iso,
2435 };
2436
2437 #ifdef CONFIG_PPC_PMAC
2438 static void ohci_pmac_on(struct pci_dev *dev)
2439 {
2440         if (machine_is(powermac)) {
2441                 struct device_node *ofn = pci_device_to_OF_node(dev);
2442
2443                 if (ofn) {
2444                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2445                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2446                 }
2447         }
2448 }
2449
2450 static void ohci_pmac_off(struct pci_dev *dev)
2451 {
2452         if (machine_is(powermac)) {
2453                 struct device_node *ofn = pci_device_to_OF_node(dev);
2454
2455                 if (ofn) {
2456                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2457                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2458                 }
2459         }
2460 }
2461 #else
2462 #define ohci_pmac_on(dev)
2463 #define ohci_pmac_off(dev)
2464 #endif /* CONFIG_PPC_PMAC */
2465
2466 #define PCI_VENDOR_ID_AGERE             PCI_VENDOR_ID_ATT
2467 #define PCI_DEVICE_ID_AGERE_FW643       0x5901
2468
2469 static int __devinit pci_probe(struct pci_dev *dev,
2470                                const struct pci_device_id *ent)
2471 {
2472         struct fw_ohci *ohci;
2473         u32 bus_options, max_receive, link_speed, version;
2474         u64 guid;
2475         int err;
2476         size_t size;
2477
2478         ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2479         if (ohci == NULL) {
2480                 err = -ENOMEM;
2481                 goto fail;
2482         }
2483
2484         fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2485
2486         ohci_pmac_on(dev);
2487
2488         err = pci_enable_device(dev);
2489         if (err) {
2490                 fw_error("Failed to enable OHCI hardware\n");
2491                 goto fail_free;
2492         }
2493
2494         pci_set_master(dev);
2495         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2496         pci_set_drvdata(dev, ohci);
2497
2498         spin_lock_init(&ohci->lock);
2499
2500         tasklet_init(&ohci->bus_reset_tasklet,
2501                      bus_reset_tasklet, (unsigned long)ohci);
2502
2503         err = pci_request_region(dev, 0, ohci_driver_name);
2504         if (err) {
2505                 fw_error("MMIO resource unavailable\n");
2506                 goto fail_disable;
2507         }
2508
2509         ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2510         if (ohci->registers == NULL) {
2511                 fw_error("Failed to remap registers\n");
2512                 err = -ENXIO;
2513                 goto fail_iomem;
2514         }
2515
2516         version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2517 #if 0
2518         /* FIXME: make it a context option or remove dual-buffer mode */
2519         ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2520 #endif
2521
2522         /* dual-buffer mode is broken if more than one IR context is active */
2523         if (dev->vendor == PCI_VENDOR_ID_AGERE &&
2524             dev->device == PCI_DEVICE_ID_AGERE_FW643)
2525                 ohci->use_dualbuffer = false;
2526
2527         /* dual-buffer mode is broken */
2528         if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2529             dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2530                 ohci->use_dualbuffer = false;
2531
2532 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2533 #if !defined(CONFIG_X86_32)
2534         /* dual-buffer mode is broken with descriptor addresses above 2G */
2535         if (dev->vendor == PCI_VENDOR_ID_TI &&
2536             dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2537                 ohci->use_dualbuffer = false;
2538 #endif
2539
2540 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2541         ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2542                              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2543 #endif
2544         ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2545
2546         ohci->iso_cycle_timer_quirk = dev->vendor == PCI_VENDOR_ID_AL   ||
2547                                       dev->vendor == PCI_VENDOR_ID_NEC  ||
2548                                       dev->vendor == PCI_VENDOR_ID_VIA;
2549
2550         ar_context_init(&ohci->ar_request_ctx, ohci,
2551                         OHCI1394_AsReqRcvContextControlSet);
2552
2553         ar_context_init(&ohci->ar_response_ctx, ohci,
2554                         OHCI1394_AsRspRcvContextControlSet);
2555
2556         context_init(&ohci->at_request_ctx, ohci,
2557                      OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2558
2559         context_init(&ohci->at_response_ctx, ohci,
2560                      OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2561
2562         reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2563         ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2564         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2565         size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2566         ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2567
2568         reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2569         ohci->ir_context_channels = ~0ULL;
2570         ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2571         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2572         size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2573         ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2574
2575         if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2576                 err = -ENOMEM;
2577                 goto fail_contexts;
2578         }
2579
2580         /* self-id dma buffer allocation */
2581         ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2582                                                SELF_ID_BUF_SIZE,
2583                                                &ohci->self_id_bus,
2584                                                GFP_KERNEL);
2585         if (ohci->self_id_cpu == NULL) {
2586                 err = -ENOMEM;
2587                 goto fail_contexts;
2588         }
2589
2590         bus_options = reg_read(ohci, OHCI1394_BusOptions);
2591         max_receive = (bus_options >> 12) & 0xf;
2592         link_speed = bus_options & 0x7;
2593         guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2594                 reg_read(ohci, OHCI1394_GUIDLo);
2595
2596         err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2597         if (err)
2598                 goto fail_self_id;
2599
2600         fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2601                   dev_name(&dev->dev), version >> 16, version & 0xff);
2602
2603         return 0;
2604
2605  fail_self_id:
2606         dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2607                           ohci->self_id_cpu, ohci->self_id_bus);
2608  fail_contexts:
2609         kfree(ohci->ir_context_list);
2610         kfree(ohci->it_context_list);
2611         context_release(&ohci->at_response_ctx);
2612         context_release(&ohci->at_request_ctx);
2613         ar_context_release(&ohci->ar_response_ctx);
2614         ar_context_release(&ohci->ar_request_ctx);
2615         pci_iounmap(dev, ohci->registers);
2616  fail_iomem:
2617         pci_release_region(dev, 0);
2618  fail_disable:
2619         pci_disable_device(dev);
2620  fail_free:
2621         kfree(&ohci->card);
2622         ohci_pmac_off(dev);
2623  fail:
2624         if (err == -ENOMEM)
2625                 fw_error("Out of memory\n");
2626
2627         return err;
2628 }
2629
2630 static void pci_remove(struct pci_dev *dev)
2631 {
2632         struct fw_ohci *ohci;
2633
2634         ohci = pci_get_drvdata(dev);
2635         reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2636         flush_writes(ohci);
2637         fw_core_remove_card(&ohci->card);
2638
2639         /*
2640          * FIXME: Fail all pending packets here, now that the upper
2641          * layers can't queue any more.
2642          */
2643
2644         software_reset(ohci);
2645         free_irq(dev->irq, ohci);
2646
2647         if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
2648                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2649                                   ohci->next_config_rom, ohci->next_config_rom_bus);
2650         if (ohci->config_rom)
2651                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2652                                   ohci->config_rom, ohci->config_rom_bus);
2653         dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2654                           ohci->self_id_cpu, ohci->self_id_bus);
2655         ar_context_release(&ohci->ar_request_ctx);
2656         ar_context_release(&ohci->ar_response_ctx);
2657         context_release(&ohci->at_request_ctx);
2658         context_release(&ohci->at_response_ctx);
2659         kfree(ohci->it_context_list);
2660         kfree(ohci->ir_context_list);
2661         pci_iounmap(dev, ohci->registers);
2662         pci_release_region(dev, 0);
2663         pci_disable_device(dev);
2664         kfree(&ohci->card);
2665         ohci_pmac_off(dev);
2666
2667         fw_notify("Removed fw-ohci device.\n");
2668 }
2669
2670 #ifdef CONFIG_PM
2671 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2672 {
2673         struct fw_ohci *ohci = pci_get_drvdata(dev);
2674         int err;
2675
2676         software_reset(ohci);
2677         free_irq(dev->irq, ohci);
2678         err = pci_save_state(dev);
2679         if (err) {
2680                 fw_error("pci_save_state failed\n");
2681                 return err;
2682         }
2683         err = pci_set_power_state(dev, pci_choose_state(dev, state));
2684         if (err)
2685                 fw_error("pci_set_power_state failed with %d\n", err);
2686         ohci_pmac_off(dev);
2687
2688         return 0;
2689 }
2690
2691 static int pci_resume(struct pci_dev *dev)
2692 {
2693         struct fw_ohci *ohci = pci_get_drvdata(dev);
2694         int err;
2695
2696         ohci_pmac_on(dev);
2697         pci_set_power_state(dev, PCI_D0);
2698         pci_restore_state(dev);
2699         err = pci_enable_device(dev);
2700         if (err) {
2701                 fw_error("pci_enable_device failed\n");
2702                 return err;
2703         }
2704
2705         return ohci_enable(&ohci->card, NULL, 0);
2706 }
2707 #endif
2708
2709 static const struct pci_device_id pci_table[] = {
2710         { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2711         { }
2712 };
2713
2714 MODULE_DEVICE_TABLE(pci, pci_table);
2715
2716 static struct pci_driver fw_ohci_pci_driver = {
2717         .name           = ohci_driver_name,
2718         .id_table       = pci_table,
2719         .probe          = pci_probe,
2720         .remove         = pci_remove,
2721 #ifdef CONFIG_PM
2722         .resume         = pci_resume,
2723         .suspend        = pci_suspend,
2724 #endif
2725 };
2726
2727 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2728 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2729 MODULE_LICENSE("GPL");
2730
2731 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2732 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2733 MODULE_ALIAS("ohci1394");
2734 #endif
2735
2736 static int __init fw_ohci_init(void)
2737 {
2738         return pci_register_driver(&fw_ohci_pci_driver);
2739 }
2740
2741 static void __exit fw_ohci_cleanup(void)
2742 {
2743         pci_unregister_driver(&fw_ohci_pci_driver);
2744 }
2745
2746 module_init(fw_ohci_init);
2747 module_exit(fw_ohci_cleanup);