2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-constants.h>
27 #include <linux/gfp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/pci.h>
37 #include <linux/pci_ids.h>
38 #include <linux/spinlock.h>
39 #include <linux/string.h>
41 #include <asm/atomic.h>
42 #include <asm/byteorder.h>
44 #include <asm/system.h>
46 #ifdef CONFIG_PPC_PMAC
47 #include <asm/pmac_feature.h>
53 #define DESCRIPTOR_OUTPUT_MORE 0
54 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
55 #define DESCRIPTOR_INPUT_MORE (2 << 12)
56 #define DESCRIPTOR_INPUT_LAST (3 << 12)
57 #define DESCRIPTOR_STATUS (1 << 11)
58 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
59 #define DESCRIPTOR_PING (1 << 7)
60 #define DESCRIPTOR_YY (1 << 6)
61 #define DESCRIPTOR_NO_IRQ (0 << 4)
62 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
63 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
64 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
65 #define DESCRIPTOR_WAIT (3 << 0)
71 __le32 branch_address;
73 __le16 transfer_status;
74 } __attribute__((aligned(16)));
76 struct db_descriptor {
79 __le16 second_req_count;
80 __le16 first_req_count;
81 __le32 branch_address;
82 __le16 second_res_count;
83 __le16 first_res_count;
88 } __attribute__((aligned(16)));
90 #define CONTROL_SET(regs) (regs)
91 #define CONTROL_CLEAR(regs) ((regs) + 4)
92 #define COMMAND_PTR(regs) ((regs) + 12)
93 #define CONTEXT_MATCH(regs) ((regs) + 16)
96 struct descriptor descriptor;
97 struct ar_buffer *next;
102 struct fw_ohci *ohci;
103 struct ar_buffer *current_buffer;
104 struct ar_buffer *last_buffer;
107 struct tasklet_struct tasklet;
112 typedef int (*descriptor_callback_t)(struct context *ctx,
113 struct descriptor *d,
114 struct descriptor *last);
117 * A buffer that contains a block of DMA-able coherent memory used for
118 * storing a portion of a DMA descriptor program.
120 struct descriptor_buffer {
121 struct list_head list;
122 dma_addr_t buffer_bus;
125 struct descriptor buffer[0];
129 struct fw_ohci *ohci;
131 int total_allocation;
134 * List of page-sized buffers for storing DMA descriptors.
135 * Head of list contains buffers in use and tail of list contains
138 struct list_head buffer_list;
141 * Pointer to a buffer inside buffer_list that contains the tail
142 * end of the current DMA program.
144 struct descriptor_buffer *buffer_tail;
147 * The descriptor containing the branch address of the first
148 * descriptor that has not yet been filled by the device.
150 struct descriptor *last;
153 * The last descriptor in the DMA program. It contains the branch
154 * address that must be updated upon appending a new descriptor.
156 struct descriptor *prev;
158 descriptor_callback_t callback;
160 struct tasklet_struct tasklet;
163 #define IT_HEADER_SY(v) ((v) << 0)
164 #define IT_HEADER_TCODE(v) ((v) << 4)
165 #define IT_HEADER_CHANNEL(v) ((v) << 8)
166 #define IT_HEADER_TAG(v) ((v) << 14)
167 #define IT_HEADER_SPEED(v) ((v) << 16)
168 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
171 struct fw_iso_context base;
172 struct context context;
175 size_t header_length;
178 #define CONFIG_ROM_SIZE 1024
183 __iomem char *registers;
184 dma_addr_t self_id_bus;
186 struct tasklet_struct bus_reset_tasklet;
189 int request_generation; /* for timestamping incoming requests */
190 atomic_t bus_seconds;
194 bool bus_reset_packet_quirk;
195 bool iso_cycle_timer_quirk;
198 * Spinlock for accessing fw_ohci data. Never call out of
199 * this driver with this lock held.
202 u32 self_id_buffer[512];
204 /* Config rom buffers */
206 dma_addr_t config_rom_bus;
207 __be32 *next_config_rom;
208 dma_addr_t next_config_rom_bus;
211 struct ar_context ar_request_ctx;
212 struct ar_context ar_response_ctx;
213 struct context at_request_ctx;
214 struct context at_response_ctx;
217 struct iso_context *it_context_list;
218 u64 ir_context_channels;
220 struct iso_context *ir_context_list;
223 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
225 return container_of(card, struct fw_ohci, card);
228 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
229 #define IR_CONTEXT_BUFFER_FILL 0x80000000
230 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
231 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
232 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
233 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
235 #define CONTEXT_RUN 0x8000
236 #define CONTEXT_WAKE 0x1000
237 #define CONTEXT_DEAD 0x0800
238 #define CONTEXT_ACTIVE 0x0400
240 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
241 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
242 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
244 #define OHCI1394_REGISTER_SIZE 0x800
245 #define OHCI_LOOP_COUNT 500
246 #define OHCI1394_PCI_HCI_Control 0x40
247 #define SELF_ID_BUF_SIZE 0x800
248 #define OHCI_TCODE_PHY_PACKET 0x0e
249 #define OHCI_VERSION_1_1 0x010010
251 static char ohci_driver_name[] = KBUILD_MODNAME;
253 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
255 #define OHCI_PARAM_DEBUG_AT_AR 1
256 #define OHCI_PARAM_DEBUG_SELFIDS 2
257 #define OHCI_PARAM_DEBUG_IRQS 4
258 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
260 static int param_debug;
261 module_param_named(debug, param_debug, int, 0644);
262 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
263 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
264 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
265 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
266 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
267 ", or a combination, or all = -1)");
269 static void log_irqs(u32 evt)
271 if (likely(!(param_debug &
272 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
275 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
276 !(evt & OHCI1394_busReset))
279 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
280 evt & OHCI1394_selfIDComplete ? " selfID" : "",
281 evt & OHCI1394_RQPkt ? " AR_req" : "",
282 evt & OHCI1394_RSPkt ? " AR_resp" : "",
283 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
284 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
285 evt & OHCI1394_isochRx ? " IR" : "",
286 evt & OHCI1394_isochTx ? " IT" : "",
287 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
288 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
289 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
290 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
291 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
292 evt & OHCI1394_busReset ? " busReset" : "",
293 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
294 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
295 OHCI1394_respTxComplete | OHCI1394_isochRx |
296 OHCI1394_isochTx | OHCI1394_postedWriteErr |
297 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
298 OHCI1394_cycleInconsistent |
299 OHCI1394_regAccessFail | OHCI1394_busReset)
303 static const char *speed[] = {
304 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
306 static const char *power[] = {
307 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
308 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
310 static const char port[] = { '.', '-', 'p', 'c', };
312 static char _p(u32 *s, int shift)
314 return port[*s >> shift & 3];
317 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
319 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
322 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
323 self_id_count, generation, node_id);
325 for (; self_id_count--; ++s)
326 if ((*s & 1 << 23) == 0)
327 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
328 "%s gc=%d %s %s%s%s\n",
329 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
330 speed[*s >> 14 & 3], *s >> 16 & 63,
331 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
332 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
334 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
336 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
337 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
340 static const char *evts[] = {
341 [0x00] = "evt_no_status", [0x01] = "-reserved-",
342 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
343 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
344 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
345 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
346 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
347 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
348 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
349 [0x10] = "-reserved-", [0x11] = "ack_complete",
350 [0x12] = "ack_pending ", [0x13] = "-reserved-",
351 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
352 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
353 [0x18] = "-reserved-", [0x19] = "-reserved-",
354 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
355 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
356 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
357 [0x20] = "pending/cancelled",
359 static const char *tcodes[] = {
360 [0x0] = "QW req", [0x1] = "BW req",
361 [0x2] = "W resp", [0x3] = "-reserved-",
362 [0x4] = "QR req", [0x5] = "BR req",
363 [0x6] = "QR resp", [0x7] = "BR resp",
364 [0x8] = "cycle start", [0x9] = "Lk req",
365 [0xa] = "async stream packet", [0xb] = "Lk resp",
366 [0xc] = "-reserved-", [0xd] = "-reserved-",
367 [0xe] = "link internal", [0xf] = "-reserved-",
369 static const char *phys[] = {
370 [0x0] = "phy config packet", [0x1] = "link-on packet",
371 [0x2] = "self-id packet", [0x3] = "-reserved-",
374 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
376 int tcode = header[0] >> 4 & 0xf;
379 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
382 if (unlikely(evt >= ARRAY_SIZE(evts)))
385 if (evt == OHCI1394_evt_bus_reset) {
386 fw_notify("A%c evt_bus_reset, generation %d\n",
387 dir, (header[2] >> 16) & 0xff);
391 if (header[0] == ~header[1]) {
392 fw_notify("A%c %s, %s, %08x\n",
393 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
398 case 0x0: case 0x6: case 0x8:
399 snprintf(specific, sizeof(specific), " = %08x",
400 be32_to_cpu((__force __be32)header[3]));
402 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
403 snprintf(specific, sizeof(specific), " %x,%x",
404 header[3] >> 16, header[3] & 0xffff);
412 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
414 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
415 fw_notify("A%c spd %x tl %02x, "
418 dir, speed, header[0] >> 10 & 0x3f,
419 header[1] >> 16, header[0] >> 16, evts[evt],
420 tcodes[tcode], header[1] & 0xffff, header[2], specific);
423 fw_notify("A%c spd %x tl %02x, "
426 dir, speed, header[0] >> 10 & 0x3f,
427 header[1] >> 16, header[0] >> 16, evts[evt],
428 tcodes[tcode], specific);
434 #define log_irqs(evt)
435 #define log_selfids(node_id, generation, self_id_count, sid)
436 #define log_ar_at_event(dir, speed, header, evt)
438 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
440 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
442 writel(data, ohci->registers + offset);
445 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
447 return readl(ohci->registers + offset);
450 static inline void flush_writes(const struct fw_ohci *ohci)
452 /* Do a dummy read to flush writes. */
453 reg_read(ohci, OHCI1394_Version);
456 static int ohci_update_phy_reg(struct fw_card *card, int addr,
457 int clear_bits, int set_bits)
459 struct fw_ohci *ohci = fw_ohci(card);
462 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
465 val = reg_read(ohci, OHCI1394_PhyControl);
466 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
467 fw_error("failed to set phy reg bits.\n");
471 old = OHCI1394_PhyControl_ReadData(val);
472 old = (old & ~clear_bits) | set_bits;
473 reg_write(ohci, OHCI1394_PhyControl,
474 OHCI1394_PhyControl_Write(addr, old));
479 static int ar_context_add_page(struct ar_context *ctx)
481 struct device *dev = ctx->ohci->card.device;
482 struct ar_buffer *ab;
483 dma_addr_t uninitialized_var(ab_bus);
486 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
491 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
492 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
494 DESCRIPTOR_BRANCH_ALWAYS);
495 offset = offsetof(struct ar_buffer, data);
496 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
497 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
498 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
499 ab->descriptor.branch_address = 0;
501 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
502 ctx->last_buffer->next = ab;
503 ctx->last_buffer = ab;
505 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
506 flush_writes(ctx->ohci);
511 static void ar_context_release(struct ar_context *ctx)
513 struct ar_buffer *ab, *ab_next;
517 for (ab = ctx->current_buffer; ab; ab = ab_next) {
519 offset = offsetof(struct ar_buffer, data);
520 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
521 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
526 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
527 #define cond_le32_to_cpu(v) \
528 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
530 #define cond_le32_to_cpu(v) le32_to_cpu(v)
533 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
535 struct fw_ohci *ohci = ctx->ohci;
537 u32 status, length, tcode;
540 p.header[0] = cond_le32_to_cpu(buffer[0]);
541 p.header[1] = cond_le32_to_cpu(buffer[1]);
542 p.header[2] = cond_le32_to_cpu(buffer[2]);
544 tcode = (p.header[0] >> 4) & 0x0f;
546 case TCODE_WRITE_QUADLET_REQUEST:
547 case TCODE_READ_QUADLET_RESPONSE:
548 p.header[3] = (__force __u32) buffer[3];
549 p.header_length = 16;
550 p.payload_length = 0;
553 case TCODE_READ_BLOCK_REQUEST :
554 p.header[3] = cond_le32_to_cpu(buffer[3]);
555 p.header_length = 16;
556 p.payload_length = 0;
559 case TCODE_WRITE_BLOCK_REQUEST:
560 case TCODE_READ_BLOCK_RESPONSE:
561 case TCODE_LOCK_REQUEST:
562 case TCODE_LOCK_RESPONSE:
563 p.header[3] = cond_le32_to_cpu(buffer[3]);
564 p.header_length = 16;
565 p.payload_length = p.header[3] >> 16;
568 case TCODE_WRITE_RESPONSE:
569 case TCODE_READ_QUADLET_REQUEST:
570 case OHCI_TCODE_PHY_PACKET:
571 p.header_length = 12;
572 p.payload_length = 0;
576 /* FIXME: Stop context, discard everything, and restart? */
578 p.payload_length = 0;
581 p.payload = (void *) buffer + p.header_length;
583 /* FIXME: What to do about evt_* errors? */
584 length = (p.header_length + p.payload_length + 3) / 4;
585 status = cond_le32_to_cpu(buffer[length]);
586 evt = (status >> 16) & 0x1f;
589 p.speed = (status >> 21) & 0x7;
590 p.timestamp = status & 0xffff;
591 p.generation = ohci->request_generation;
593 log_ar_at_event('R', p.speed, p.header, evt);
596 * The OHCI bus reset handler synthesizes a phy packet with
597 * the new generation number when a bus reset happens (see
598 * section 8.4.2.3). This helps us determine when a request
599 * was received and make sure we send the response in the same
600 * generation. We only need this for requests; for responses
601 * we use the unique tlabel for finding the matching
604 * Alas some chips sometimes emit bus reset packets with a
605 * wrong generation. We set the correct generation for these
606 * at a slightly incorrect time (in bus_reset_tasklet).
608 if (evt == OHCI1394_evt_bus_reset) {
609 if (!ohci->bus_reset_packet_quirk)
610 ohci->request_generation = (p.header[2] >> 16) & 0xff;
611 } else if (ctx == &ohci->ar_request_ctx) {
612 fw_core_handle_request(&ohci->card, &p);
614 fw_core_handle_response(&ohci->card, &p);
617 return buffer + length + 1;
620 static void ar_context_tasklet(unsigned long data)
622 struct ar_context *ctx = (struct ar_context *)data;
623 struct fw_ohci *ohci = ctx->ohci;
624 struct ar_buffer *ab;
625 struct descriptor *d;
628 ab = ctx->current_buffer;
631 if (d->res_count == 0) {
632 size_t size, rest, offset;
633 dma_addr_t start_bus;
637 * This descriptor is finished and we may have a
638 * packet split across this and the next buffer. We
639 * reuse the page for reassembling the split packet.
642 offset = offsetof(struct ar_buffer, data);
644 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
648 size = buffer + PAGE_SIZE - ctx->pointer;
649 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
650 memmove(buffer, ctx->pointer, size);
651 memcpy(buffer + size, ab->data, rest);
652 ctx->current_buffer = ab;
653 ctx->pointer = (void *) ab->data + rest;
654 end = buffer + size + rest;
657 buffer = handle_ar_packet(ctx, buffer);
659 dma_free_coherent(ohci->card.device, PAGE_SIZE,
661 ar_context_add_page(ctx);
663 buffer = ctx->pointer;
665 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
668 buffer = handle_ar_packet(ctx, buffer);
672 static int ar_context_init(struct ar_context *ctx,
673 struct fw_ohci *ohci, u32 regs)
679 ctx->last_buffer = &ab;
680 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
682 ar_context_add_page(ctx);
683 ar_context_add_page(ctx);
684 ctx->current_buffer = ab.next;
685 ctx->pointer = ctx->current_buffer->data;
690 static void ar_context_run(struct ar_context *ctx)
692 struct ar_buffer *ab = ctx->current_buffer;
696 offset = offsetof(struct ar_buffer, data);
697 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
699 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
700 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
701 flush_writes(ctx->ohci);
704 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
708 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
709 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
711 /* figure out which descriptor the branch address goes in */
712 if (z == 2 && (b == 3 || key == 2))
718 static void context_tasklet(unsigned long data)
720 struct context *ctx = (struct context *) data;
721 struct descriptor *d, *last;
724 struct descriptor_buffer *desc;
726 desc = list_entry(ctx->buffer_list.next,
727 struct descriptor_buffer, list);
729 while (last->branch_address != 0) {
730 struct descriptor_buffer *old_desc = desc;
731 address = le32_to_cpu(last->branch_address);
735 /* If the branch address points to a buffer outside of the
736 * current buffer, advance to the next buffer. */
737 if (address < desc->buffer_bus ||
738 address >= desc->buffer_bus + desc->used)
739 desc = list_entry(desc->list.next,
740 struct descriptor_buffer, list);
741 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
742 last = find_branch_descriptor(d, z);
744 if (!ctx->callback(ctx, d, last))
747 if (old_desc != desc) {
748 /* If we've advanced to the next buffer, move the
749 * previous buffer to the free list. */
752 spin_lock_irqsave(&ctx->ohci->lock, flags);
753 list_move_tail(&old_desc->list, &ctx->buffer_list);
754 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
761 * Allocate a new buffer and add it to the list of free buffers for this
762 * context. Must be called with ohci->lock held.
764 static int context_add_buffer(struct context *ctx)
766 struct descriptor_buffer *desc;
767 dma_addr_t uninitialized_var(bus_addr);
771 * 16MB of descriptors should be far more than enough for any DMA
772 * program. This will catch run-away userspace or DoS attacks.
774 if (ctx->total_allocation >= 16*1024*1024)
777 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
778 &bus_addr, GFP_ATOMIC);
782 offset = (void *)&desc->buffer - (void *)desc;
783 desc->buffer_size = PAGE_SIZE - offset;
784 desc->buffer_bus = bus_addr + offset;
787 list_add_tail(&desc->list, &ctx->buffer_list);
788 ctx->total_allocation += PAGE_SIZE;
793 static int context_init(struct context *ctx, struct fw_ohci *ohci,
794 u32 regs, descriptor_callback_t callback)
798 ctx->total_allocation = 0;
800 INIT_LIST_HEAD(&ctx->buffer_list);
801 if (context_add_buffer(ctx) < 0)
804 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
805 struct descriptor_buffer, list);
807 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
808 ctx->callback = callback;
811 * We put a dummy descriptor in the buffer that has a NULL
812 * branch address and looks like it's been sent. That way we
813 * have a descriptor to append DMA programs to.
815 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
816 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
817 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
818 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
819 ctx->last = ctx->buffer_tail->buffer;
820 ctx->prev = ctx->buffer_tail->buffer;
825 static void context_release(struct context *ctx)
827 struct fw_card *card = &ctx->ohci->card;
828 struct descriptor_buffer *desc, *tmp;
830 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
831 dma_free_coherent(card->device, PAGE_SIZE, desc,
833 ((void *)&desc->buffer - (void *)desc));
836 /* Must be called with ohci->lock held */
837 static struct descriptor *context_get_descriptors(struct context *ctx,
838 int z, dma_addr_t *d_bus)
840 struct descriptor *d = NULL;
841 struct descriptor_buffer *desc = ctx->buffer_tail;
843 if (z * sizeof(*d) > desc->buffer_size)
846 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
847 /* No room for the descriptor in this buffer, so advance to the
850 if (desc->list.next == &ctx->buffer_list) {
851 /* If there is no free buffer next in the list,
853 if (context_add_buffer(ctx) < 0)
856 desc = list_entry(desc->list.next,
857 struct descriptor_buffer, list);
858 ctx->buffer_tail = desc;
861 d = desc->buffer + desc->used / sizeof(*d);
862 memset(d, 0, z * sizeof(*d));
863 *d_bus = desc->buffer_bus + desc->used;
868 static void context_run(struct context *ctx, u32 extra)
870 struct fw_ohci *ohci = ctx->ohci;
872 reg_write(ohci, COMMAND_PTR(ctx->regs),
873 le32_to_cpu(ctx->last->branch_address));
874 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
875 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
879 static void context_append(struct context *ctx,
880 struct descriptor *d, int z, int extra)
883 struct descriptor_buffer *desc = ctx->buffer_tail;
885 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
887 desc->used += (z + extra) * sizeof(*d);
888 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
889 ctx->prev = find_branch_descriptor(d, z);
891 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
892 flush_writes(ctx->ohci);
895 static void context_stop(struct context *ctx)
900 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
901 flush_writes(ctx->ohci);
903 for (i = 0; i < 10; i++) {
904 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
905 if ((reg & CONTEXT_ACTIVE) == 0)
910 fw_error("Error: DMA context still active (0x%08x)\n", reg);
914 struct fw_packet *packet;
918 * This function apppends a packet to the DMA queue for transmission.
919 * Must always be called with the ochi->lock held to ensure proper
920 * generation handling and locking around packet queue manipulation.
922 static int at_context_queue_packet(struct context *ctx,
923 struct fw_packet *packet)
925 struct fw_ohci *ohci = ctx->ohci;
926 dma_addr_t d_bus, uninitialized_var(payload_bus);
927 struct driver_data *driver_data;
928 struct descriptor *d, *last;
933 d = context_get_descriptors(ctx, 4, &d_bus);
935 packet->ack = RCODE_SEND_ERROR;
939 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
940 d[0].res_count = cpu_to_le16(packet->timestamp);
943 * The DMA format for asyncronous link packets is different
944 * from the IEEE1394 layout, so shift the fields around
945 * accordingly. If header_length is 8, it's a PHY packet, to
946 * which we need to prepend an extra quadlet.
949 header = (__le32 *) &d[1];
950 switch (packet->header_length) {
953 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
954 (packet->speed << 16));
955 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
956 (packet->header[0] & 0xffff0000));
957 header[2] = cpu_to_le32(packet->header[2]);
959 tcode = (packet->header[0] >> 4) & 0x0f;
960 if (TCODE_IS_BLOCK_PACKET(tcode))
961 header[3] = cpu_to_le32(packet->header[3]);
963 header[3] = (__force __le32) packet->header[3];
965 d[0].req_count = cpu_to_le16(packet->header_length);
969 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
970 (packet->speed << 16));
971 header[1] = cpu_to_le32(packet->header[0]);
972 header[2] = cpu_to_le32(packet->header[1]);
973 d[0].req_count = cpu_to_le16(12);
977 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
978 (packet->speed << 16));
979 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
980 d[0].req_count = cpu_to_le16(8);
985 packet->ack = RCODE_SEND_ERROR;
989 driver_data = (struct driver_data *) &d[3];
990 driver_data->packet = packet;
991 packet->driver_data = driver_data;
993 if (packet->payload_length > 0) {
995 dma_map_single(ohci->card.device, packet->payload,
996 packet->payload_length, DMA_TO_DEVICE);
997 if (dma_mapping_error(ohci->card.device, payload_bus)) {
998 packet->ack = RCODE_SEND_ERROR;
1001 packet->payload_bus = payload_bus;
1002 packet->payload_mapped = true;
1004 d[2].req_count = cpu_to_le16(packet->payload_length);
1005 d[2].data_address = cpu_to_le32(payload_bus);
1013 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1014 DESCRIPTOR_IRQ_ALWAYS |
1015 DESCRIPTOR_BRANCH_ALWAYS);
1018 * If the controller and packet generations don't match, we need to
1019 * bail out and try again. If IntEvent.busReset is set, the AT context
1020 * is halted, so appending to the context and trying to run it is
1021 * futile. Most controllers do the right thing and just flush the AT
1022 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1023 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1024 * up stalling out. So we just bail out in software and try again
1025 * later, and everyone is happy.
1026 * FIXME: Document how the locking works.
1028 if (ohci->generation != packet->generation ||
1029 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1030 if (packet->payload_mapped)
1031 dma_unmap_single(ohci->card.device, payload_bus,
1032 packet->payload_length, DMA_TO_DEVICE);
1033 packet->ack = RCODE_GENERATION;
1037 context_append(ctx, d, z, 4 - z);
1039 /* If the context isn't already running, start it up. */
1040 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1041 if ((reg & CONTEXT_RUN) == 0)
1042 context_run(ctx, 0);
1047 static int handle_at_packet(struct context *context,
1048 struct descriptor *d,
1049 struct descriptor *last)
1051 struct driver_data *driver_data;
1052 struct fw_packet *packet;
1053 struct fw_ohci *ohci = context->ohci;
1056 if (last->transfer_status == 0)
1057 /* This descriptor isn't done yet, stop iteration. */
1060 driver_data = (struct driver_data *) &d[3];
1061 packet = driver_data->packet;
1063 /* This packet was cancelled, just continue. */
1066 if (packet->payload_mapped)
1067 dma_unmap_single(ohci->card.device, packet->payload_bus,
1068 packet->payload_length, DMA_TO_DEVICE);
1070 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1071 packet->timestamp = le16_to_cpu(last->res_count);
1073 log_ar_at_event('T', packet->speed, packet->header, evt);
1076 case OHCI1394_evt_timeout:
1077 /* Async response transmit timed out. */
1078 packet->ack = RCODE_CANCELLED;
1081 case OHCI1394_evt_flushed:
1083 * The packet was flushed should give same error as
1084 * when we try to use a stale generation count.
1086 packet->ack = RCODE_GENERATION;
1089 case OHCI1394_evt_missing_ack:
1091 * Using a valid (current) generation count, but the
1092 * node is not on the bus or not sending acks.
1094 packet->ack = RCODE_NO_ACK;
1097 case ACK_COMPLETE + 0x10:
1098 case ACK_PENDING + 0x10:
1099 case ACK_BUSY_X + 0x10:
1100 case ACK_BUSY_A + 0x10:
1101 case ACK_BUSY_B + 0x10:
1102 case ACK_DATA_ERROR + 0x10:
1103 case ACK_TYPE_ERROR + 0x10:
1104 packet->ack = evt - 0x10;
1108 packet->ack = RCODE_SEND_ERROR;
1112 packet->callback(packet, &ohci->card, packet->ack);
1117 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1118 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1119 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1120 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1121 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1123 static void handle_local_rom(struct fw_ohci *ohci,
1124 struct fw_packet *packet, u32 csr)
1126 struct fw_packet response;
1127 int tcode, length, i;
1129 tcode = HEADER_GET_TCODE(packet->header[0]);
1130 if (TCODE_IS_BLOCK_PACKET(tcode))
1131 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1135 i = csr - CSR_CONFIG_ROM;
1136 if (i + length > CONFIG_ROM_SIZE) {
1137 fw_fill_response(&response, packet->header,
1138 RCODE_ADDRESS_ERROR, NULL, 0);
1139 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1140 fw_fill_response(&response, packet->header,
1141 RCODE_TYPE_ERROR, NULL, 0);
1143 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1144 (void *) ohci->config_rom + i, length);
1147 fw_core_handle_response(&ohci->card, &response);
1150 static void handle_local_lock(struct fw_ohci *ohci,
1151 struct fw_packet *packet, u32 csr)
1153 struct fw_packet response;
1154 int tcode, length, ext_tcode, sel;
1155 __be32 *payload, lock_old;
1156 u32 lock_arg, lock_data;
1158 tcode = HEADER_GET_TCODE(packet->header[0]);
1159 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1160 payload = packet->payload;
1161 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1163 if (tcode == TCODE_LOCK_REQUEST &&
1164 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1165 lock_arg = be32_to_cpu(payload[0]);
1166 lock_data = be32_to_cpu(payload[1]);
1167 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1171 fw_fill_response(&response, packet->header,
1172 RCODE_TYPE_ERROR, NULL, 0);
1176 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1177 reg_write(ohci, OHCI1394_CSRData, lock_data);
1178 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1179 reg_write(ohci, OHCI1394_CSRControl, sel);
1181 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1182 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
1184 fw_notify("swap not done yet\n");
1186 fw_fill_response(&response, packet->header,
1187 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1189 fw_core_handle_response(&ohci->card, &response);
1192 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1197 if (ctx == &ctx->ohci->at_request_ctx) {
1198 packet->ack = ACK_PENDING;
1199 packet->callback(packet, &ctx->ohci->card, packet->ack);
1203 ((unsigned long long)
1204 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1206 csr = offset - CSR_REGISTER_BASE;
1208 /* Handle config rom reads. */
1209 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1210 handle_local_rom(ctx->ohci, packet, csr);
1212 case CSR_BUS_MANAGER_ID:
1213 case CSR_BANDWIDTH_AVAILABLE:
1214 case CSR_CHANNELS_AVAILABLE_HI:
1215 case CSR_CHANNELS_AVAILABLE_LO:
1216 handle_local_lock(ctx->ohci, packet, csr);
1219 if (ctx == &ctx->ohci->at_request_ctx)
1220 fw_core_handle_request(&ctx->ohci->card, packet);
1222 fw_core_handle_response(&ctx->ohci->card, packet);
1226 if (ctx == &ctx->ohci->at_response_ctx) {
1227 packet->ack = ACK_COMPLETE;
1228 packet->callback(packet, &ctx->ohci->card, packet->ack);
1232 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1234 unsigned long flags;
1237 spin_lock_irqsave(&ctx->ohci->lock, flags);
1239 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1240 ctx->ohci->generation == packet->generation) {
1241 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1242 handle_local_request(ctx, packet);
1246 ret = at_context_queue_packet(ctx, packet);
1247 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1250 packet->callback(packet, &ctx->ohci->card, packet->ack);
1254 static void bus_reset_tasklet(unsigned long data)
1256 struct fw_ohci *ohci = (struct fw_ohci *)data;
1257 int self_id_count, i, j, reg;
1258 int generation, new_generation;
1259 unsigned long flags;
1260 void *free_rom = NULL;
1261 dma_addr_t free_rom_bus = 0;
1263 reg = reg_read(ohci, OHCI1394_NodeID);
1264 if (!(reg & OHCI1394_NodeID_idValid)) {
1265 fw_notify("node ID not valid, new bus reset in progress\n");
1268 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1269 fw_notify("malconfigured bus\n");
1272 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1273 OHCI1394_NodeID_nodeNumber);
1275 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1276 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1277 fw_notify("inconsistent self IDs\n");
1281 * The count in the SelfIDCount register is the number of
1282 * bytes in the self ID receive buffer. Since we also receive
1283 * the inverted quadlets and a header quadlet, we shift one
1284 * bit extra to get the actual number of self IDs.
1286 self_id_count = (reg >> 3) & 0xff;
1287 if (self_id_count == 0 || self_id_count > 252) {
1288 fw_notify("inconsistent self IDs\n");
1291 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1294 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1295 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1296 fw_notify("inconsistent self IDs\n");
1299 ohci->self_id_buffer[j] =
1300 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1305 * Check the consistency of the self IDs we just read. The
1306 * problem we face is that a new bus reset can start while we
1307 * read out the self IDs from the DMA buffer. If this happens,
1308 * the DMA buffer will be overwritten with new self IDs and we
1309 * will read out inconsistent data. The OHCI specification
1310 * (section 11.2) recommends a technique similar to
1311 * linux/seqlock.h, where we remember the generation of the
1312 * self IDs in the buffer before reading them out and compare
1313 * it to the current generation after reading them out. If
1314 * the two generations match we know we have a consistent set
1318 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1319 if (new_generation != generation) {
1320 fw_notify("recursive bus reset detected, "
1321 "discarding self ids\n");
1325 /* FIXME: Document how the locking works. */
1326 spin_lock_irqsave(&ohci->lock, flags);
1328 ohci->generation = generation;
1329 context_stop(&ohci->at_request_ctx);
1330 context_stop(&ohci->at_response_ctx);
1331 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1333 if (ohci->bus_reset_packet_quirk)
1334 ohci->request_generation = generation;
1337 * This next bit is unrelated to the AT context stuff but we
1338 * have to do it under the spinlock also. If a new config rom
1339 * was set up before this reset, the old one is now no longer
1340 * in use and we can free it. Update the config rom pointers
1341 * to point to the current config rom and clear the
1342 * next_config_rom pointer so a new udpate can take place.
1345 if (ohci->next_config_rom != NULL) {
1346 if (ohci->next_config_rom != ohci->config_rom) {
1347 free_rom = ohci->config_rom;
1348 free_rom_bus = ohci->config_rom_bus;
1350 ohci->config_rom = ohci->next_config_rom;
1351 ohci->config_rom_bus = ohci->next_config_rom_bus;
1352 ohci->next_config_rom = NULL;
1355 * Restore config_rom image and manually update
1356 * config_rom registers. Writing the header quadlet
1357 * will indicate that the config rom is ready, so we
1360 reg_write(ohci, OHCI1394_BusOptions,
1361 be32_to_cpu(ohci->config_rom[2]));
1362 ohci->config_rom[0] = ohci->next_header;
1363 reg_write(ohci, OHCI1394_ConfigROMhdr,
1364 be32_to_cpu(ohci->next_header));
1367 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1368 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1369 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1372 spin_unlock_irqrestore(&ohci->lock, flags);
1375 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1376 free_rom, free_rom_bus);
1378 log_selfids(ohci->node_id, generation,
1379 self_id_count, ohci->self_id_buffer);
1381 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1382 self_id_count, ohci->self_id_buffer);
1385 static irqreturn_t irq_handler(int irq, void *data)
1387 struct fw_ohci *ohci = data;
1388 u32 event, iso_event, cycle_time;
1391 event = reg_read(ohci, OHCI1394_IntEventClear);
1393 if (!event || !~event)
1396 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1397 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1400 if (event & OHCI1394_selfIDComplete)
1401 tasklet_schedule(&ohci->bus_reset_tasklet);
1403 if (event & OHCI1394_RQPkt)
1404 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1406 if (event & OHCI1394_RSPkt)
1407 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1409 if (event & OHCI1394_reqTxComplete)
1410 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1412 if (event & OHCI1394_respTxComplete)
1413 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1415 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1416 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1419 i = ffs(iso_event) - 1;
1420 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1421 iso_event &= ~(1 << i);
1424 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1425 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1428 i = ffs(iso_event) - 1;
1429 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1430 iso_event &= ~(1 << i);
1433 if (unlikely(event & OHCI1394_regAccessFail))
1434 fw_error("Register access failure - "
1435 "please notify linux1394-devel@lists.sf.net\n");
1437 if (unlikely(event & OHCI1394_postedWriteErr))
1438 fw_error("PCI posted write error\n");
1440 if (unlikely(event & OHCI1394_cycleTooLong)) {
1441 if (printk_ratelimit())
1442 fw_notify("isochronous cycle too long\n");
1443 reg_write(ohci, OHCI1394_LinkControlSet,
1444 OHCI1394_LinkControl_cycleMaster);
1447 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1449 * We need to clear this event bit in order to make
1450 * cycleMatch isochronous I/O work. In theory we should
1451 * stop active cycleMatch iso contexts now and restart
1452 * them at least two cycles later. (FIXME?)
1454 if (printk_ratelimit())
1455 fw_notify("isochronous cycle inconsistent\n");
1458 if (event & OHCI1394_cycle64Seconds) {
1459 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1460 if ((cycle_time & 0x80000000) == 0)
1461 atomic_inc(&ohci->bus_seconds);
1467 static int software_reset(struct fw_ohci *ohci)
1471 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1473 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1474 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1475 OHCI1394_HCControl_softReset) == 0)
1483 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1485 size_t size = length * 4;
1487 memcpy(dest, src, size);
1488 if (size < CONFIG_ROM_SIZE)
1489 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1492 static int ohci_enable(struct fw_card *card,
1493 const __be32 *config_rom, size_t length)
1495 struct fw_ohci *ohci = fw_ohci(card);
1496 struct pci_dev *dev = to_pci_dev(card->device);
1500 if (software_reset(ohci)) {
1501 fw_error("Failed to reset ohci card.\n");
1506 * Now enable LPS, which we need in order to start accessing
1507 * most of the registers. In fact, on some cards (ALI M5251),
1508 * accessing registers in the SClk domain without LPS enabled
1509 * will lock up the machine. Wait 50msec to make sure we have
1510 * full link enabled. However, with some cards (well, at least
1511 * a JMicron PCIe card), we have to try again sometimes.
1513 reg_write(ohci, OHCI1394_HCControlSet,
1514 OHCI1394_HCControl_LPS |
1515 OHCI1394_HCControl_postedWriteEnable);
1518 for (lps = 0, i = 0; !lps && i < 3; i++) {
1520 lps = reg_read(ohci, OHCI1394_HCControlSet) &
1521 OHCI1394_HCControl_LPS;
1525 fw_error("Failed to set Link Power Status\n");
1529 reg_write(ohci, OHCI1394_HCControlClear,
1530 OHCI1394_HCControl_noByteSwapData);
1532 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1533 reg_write(ohci, OHCI1394_LinkControlClear,
1534 OHCI1394_LinkControl_rcvPhyPkt);
1535 reg_write(ohci, OHCI1394_LinkControlSet,
1536 OHCI1394_LinkControl_rcvSelfID |
1537 OHCI1394_LinkControl_cycleTimerEnable |
1538 OHCI1394_LinkControl_cycleMaster);
1540 reg_write(ohci, OHCI1394_ATRetries,
1541 OHCI1394_MAX_AT_REQ_RETRIES |
1542 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1543 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1545 ar_context_run(&ohci->ar_request_ctx);
1546 ar_context_run(&ohci->ar_response_ctx);
1548 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1549 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1550 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1551 reg_write(ohci, OHCI1394_IntMaskSet,
1552 OHCI1394_selfIDComplete |
1553 OHCI1394_RQPkt | OHCI1394_RSPkt |
1554 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1555 OHCI1394_isochRx | OHCI1394_isochTx |
1556 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1557 OHCI1394_cycleInconsistent |
1558 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1559 OHCI1394_masterIntEnable);
1560 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1561 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1563 /* Activate link_on bit and contender bit in our self ID packets.*/
1564 if (ohci_update_phy_reg(card, 4, 0,
1565 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1569 * When the link is not yet enabled, the atomic config rom
1570 * update mechanism described below in ohci_set_config_rom()
1571 * is not active. We have to update ConfigRomHeader and
1572 * BusOptions manually, and the write to ConfigROMmap takes
1573 * effect immediately. We tie this to the enabling of the
1574 * link, so we have a valid config rom before enabling - the
1575 * OHCI requires that ConfigROMhdr and BusOptions have valid
1576 * values before enabling.
1578 * However, when the ConfigROMmap is written, some controllers
1579 * always read back quadlets 0 and 2 from the config rom to
1580 * the ConfigRomHeader and BusOptions registers on bus reset.
1581 * They shouldn't do that in this initial case where the link
1582 * isn't enabled. This means we have to use the same
1583 * workaround here, setting the bus header to 0 and then write
1584 * the right values in the bus reset tasklet.
1588 ohci->next_config_rom =
1589 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1590 &ohci->next_config_rom_bus,
1592 if (ohci->next_config_rom == NULL)
1595 copy_config_rom(ohci->next_config_rom, config_rom, length);
1598 * In the suspend case, config_rom is NULL, which
1599 * means that we just reuse the old config rom.
1601 ohci->next_config_rom = ohci->config_rom;
1602 ohci->next_config_rom_bus = ohci->config_rom_bus;
1605 ohci->next_header = ohci->next_config_rom[0];
1606 ohci->next_config_rom[0] = 0;
1607 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1608 reg_write(ohci, OHCI1394_BusOptions,
1609 be32_to_cpu(ohci->next_config_rom[2]));
1610 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1612 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1614 if (request_irq(dev->irq, irq_handler,
1615 IRQF_SHARED, ohci_driver_name, ohci)) {
1616 fw_error("Failed to allocate shared interrupt %d.\n",
1618 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1619 ohci->config_rom, ohci->config_rom_bus);
1623 reg_write(ohci, OHCI1394_HCControlSet,
1624 OHCI1394_HCControl_linkEnable |
1625 OHCI1394_HCControl_BIBimageValid);
1629 * We are ready to go, initiate bus reset to finish the
1633 fw_core_initiate_bus_reset(&ohci->card, 1);
1638 static int ohci_set_config_rom(struct fw_card *card,
1639 const __be32 *config_rom, size_t length)
1641 struct fw_ohci *ohci;
1642 unsigned long flags;
1644 __be32 *next_config_rom;
1645 dma_addr_t uninitialized_var(next_config_rom_bus);
1647 ohci = fw_ohci(card);
1650 * When the OHCI controller is enabled, the config rom update
1651 * mechanism is a bit tricky, but easy enough to use. See
1652 * section 5.5.6 in the OHCI specification.
1654 * The OHCI controller caches the new config rom address in a
1655 * shadow register (ConfigROMmapNext) and needs a bus reset
1656 * for the changes to take place. When the bus reset is
1657 * detected, the controller loads the new values for the
1658 * ConfigRomHeader and BusOptions registers from the specified
1659 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1660 * shadow register. All automatically and atomically.
1662 * Now, there's a twist to this story. The automatic load of
1663 * ConfigRomHeader and BusOptions doesn't honor the
1664 * noByteSwapData bit, so with a be32 config rom, the
1665 * controller will load be32 values in to these registers
1666 * during the atomic update, even on litte endian
1667 * architectures. The workaround we use is to put a 0 in the
1668 * header quadlet; 0 is endian agnostic and means that the
1669 * config rom isn't ready yet. In the bus reset tasklet we
1670 * then set up the real values for the two registers.
1672 * We use ohci->lock to avoid racing with the code that sets
1673 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1677 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1678 &next_config_rom_bus, GFP_KERNEL);
1679 if (next_config_rom == NULL)
1682 spin_lock_irqsave(&ohci->lock, flags);
1684 if (ohci->next_config_rom == NULL) {
1685 ohci->next_config_rom = next_config_rom;
1686 ohci->next_config_rom_bus = next_config_rom_bus;
1688 copy_config_rom(ohci->next_config_rom, config_rom, length);
1690 ohci->next_header = config_rom[0];
1691 ohci->next_config_rom[0] = 0;
1693 reg_write(ohci, OHCI1394_ConfigROMmap,
1694 ohci->next_config_rom_bus);
1698 spin_unlock_irqrestore(&ohci->lock, flags);
1701 * Now initiate a bus reset to have the changes take
1702 * effect. We clean up the old config rom memory and DMA
1703 * mappings in the bus reset tasklet, since the OHCI
1704 * controller could need to access it before the bus reset
1708 fw_core_initiate_bus_reset(&ohci->card, 1);
1710 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1711 next_config_rom, next_config_rom_bus);
1716 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1718 struct fw_ohci *ohci = fw_ohci(card);
1720 at_context_transmit(&ohci->at_request_ctx, packet);
1723 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1725 struct fw_ohci *ohci = fw_ohci(card);
1727 at_context_transmit(&ohci->at_response_ctx, packet);
1730 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1732 struct fw_ohci *ohci = fw_ohci(card);
1733 struct context *ctx = &ohci->at_request_ctx;
1734 struct driver_data *driver_data = packet->driver_data;
1737 tasklet_disable(&ctx->tasklet);
1739 if (packet->ack != 0)
1742 if (packet->payload_mapped)
1743 dma_unmap_single(ohci->card.device, packet->payload_bus,
1744 packet->payload_length, DMA_TO_DEVICE);
1746 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1747 driver_data->packet = NULL;
1748 packet->ack = RCODE_CANCELLED;
1749 packet->callback(packet, &ohci->card, packet->ack);
1752 tasklet_enable(&ctx->tasklet);
1757 static int ohci_enable_phys_dma(struct fw_card *card,
1758 int node_id, int generation)
1760 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1763 struct fw_ohci *ohci = fw_ohci(card);
1764 unsigned long flags;
1768 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1769 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1772 spin_lock_irqsave(&ohci->lock, flags);
1774 if (ohci->generation != generation) {
1780 * Note, if the node ID contains a non-local bus ID, physical DMA is
1781 * enabled for _all_ nodes on remote buses.
1784 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1786 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1788 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1792 spin_unlock_irqrestore(&ohci->lock, flags);
1795 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1798 static inline u32 cycle_timer_ticks(u32 cycle_timer)
1802 ticks = cycle_timer & 0xfff;
1803 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1804 ticks += (3072 * 8000) * (cycle_timer >> 25);
1808 static u64 ohci_get_bus_time(struct fw_card *card)
1810 struct fw_ohci *ohci = fw_ohci(card);
1816 if (!ohci->iso_cycle_timer_quirk) {
1817 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1820 * Some controllers exhibit one or more of the following bugs
1821 * when updating the iso cycle timer register:
1822 * - When the lowest six bits are wrapping around to zero,
1823 * a read that happens at the same time will return garbage
1824 * in the lowest ten bits.
1825 * - When the cycleOffset field wraps around to zero, the
1826 * cycleCount field is not incremented for about 60 ns.
1827 * - Occasionally, the entire register reads zero.
1829 * To catch these, we read the register three times and ensure
1830 * that the difference between each two consecutive reads is
1831 * approximately the same, i.e., less than twice the other.
1832 * Furthermore, any negative difference indicates an error.
1833 * (A PCI read should take at least 20 ticks of the 24.576 MHz
1834 * timer to execute, so we have enough precision to compute the
1835 * ratio of the differences.)
1838 c0 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1839 c1 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1840 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1841 t0 = cycle_timer_ticks(c0);
1842 t1 = cycle_timer_ticks(c1);
1843 t2 = cycle_timer_ticks(c2);
1846 } while (diff01 <= 0 || diff12 <= 0 ||
1847 diff01 / diff12 >= 2 || diff12 / diff01 >= 2);
1849 bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | c2;
1854 static void copy_iso_headers(struct iso_context *ctx, void *p)
1856 int i = ctx->header_length;
1858 if (i + ctx->base.header_size > PAGE_SIZE)
1862 * The iso header is byteswapped to little endian by
1863 * the controller, but the remaining header quadlets
1864 * are big endian. We want to present all the headers
1865 * as big endian, so we have to swap the first quadlet.
1867 if (ctx->base.header_size > 0)
1868 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1869 if (ctx->base.header_size > 4)
1870 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
1871 if (ctx->base.header_size > 8)
1872 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
1873 ctx->header_length += ctx->base.header_size;
1876 static int handle_ir_dualbuffer_packet(struct context *context,
1877 struct descriptor *d,
1878 struct descriptor *last)
1880 struct iso_context *ctx =
1881 container_of(context, struct iso_context, context);
1882 struct db_descriptor *db = (struct db_descriptor *) d;
1884 size_t header_length;
1887 if (db->first_res_count != 0 && db->second_res_count != 0) {
1888 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1889 /* This descriptor isn't done yet, stop iteration. */
1892 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1895 header_length = le16_to_cpu(db->first_req_count) -
1896 le16_to_cpu(db->first_res_count);
1899 end = p + header_length;
1901 copy_iso_headers(ctx, p);
1902 ctx->excess_bytes +=
1903 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1904 p += max(ctx->base.header_size, (size_t)8);
1907 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1908 le16_to_cpu(db->second_res_count);
1910 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1911 ir_header = (__le32 *) (db + 1);
1912 ctx->base.callback(&ctx->base,
1913 le32_to_cpu(ir_header[0]) & 0xffff,
1914 ctx->header_length, ctx->header,
1915 ctx->base.callback_data);
1916 ctx->header_length = 0;
1922 static int handle_ir_packet_per_buffer(struct context *context,
1923 struct descriptor *d,
1924 struct descriptor *last)
1926 struct iso_context *ctx =
1927 container_of(context, struct iso_context, context);
1928 struct descriptor *pd;
1932 for (pd = d; pd <= last; pd++) {
1933 if (pd->transfer_status)
1937 /* Descriptor(s) not done yet, stop iteration */
1941 copy_iso_headers(ctx, p);
1943 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1944 ir_header = (__le32 *) p;
1945 ctx->base.callback(&ctx->base,
1946 le32_to_cpu(ir_header[0]) & 0xffff,
1947 ctx->header_length, ctx->header,
1948 ctx->base.callback_data);
1949 ctx->header_length = 0;
1955 static int handle_it_packet(struct context *context,
1956 struct descriptor *d,
1957 struct descriptor *last)
1959 struct iso_context *ctx =
1960 container_of(context, struct iso_context, context);
1962 struct descriptor *pd;
1964 for (pd = d; pd <= last; pd++)
1965 if (pd->transfer_status)
1968 /* Descriptor(s) not done yet, stop iteration */
1971 i = ctx->header_length;
1972 if (i + 4 < PAGE_SIZE) {
1973 /* Present this value as big-endian to match the receive code */
1974 *(__be32 *)(ctx->header + i) = cpu_to_be32(
1975 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
1976 le16_to_cpu(pd->res_count));
1977 ctx->header_length += 4;
1979 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1980 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1981 ctx->header_length, ctx->header,
1982 ctx->base.callback_data);
1983 ctx->header_length = 0;
1988 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1989 int type, int channel, size_t header_size)
1991 struct fw_ohci *ohci = fw_ohci(card);
1992 struct iso_context *ctx, *list;
1993 descriptor_callback_t callback;
1994 u64 *channels, dont_care = ~0ULL;
1996 unsigned long flags;
1997 int index, ret = -ENOMEM;
1999 if (type == FW_ISO_CONTEXT_TRANSMIT) {
2000 channels = &dont_care;
2001 mask = &ohci->it_context_mask;
2002 list = ohci->it_context_list;
2003 callback = handle_it_packet;
2005 channels = &ohci->ir_context_channels;
2006 mask = &ohci->ir_context_mask;
2007 list = ohci->ir_context_list;
2008 if (ohci->use_dualbuffer)
2009 callback = handle_ir_dualbuffer_packet;
2011 callback = handle_ir_packet_per_buffer;
2014 spin_lock_irqsave(&ohci->lock, flags);
2015 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2017 *channels &= ~(1ULL << channel);
2018 *mask &= ~(1 << index);
2020 spin_unlock_irqrestore(&ohci->lock, flags);
2023 return ERR_PTR(-EBUSY);
2025 if (type == FW_ISO_CONTEXT_TRANSMIT)
2026 regs = OHCI1394_IsoXmitContextBase(index);
2028 regs = OHCI1394_IsoRcvContextBase(index);
2031 memset(ctx, 0, sizeof(*ctx));
2032 ctx->header_length = 0;
2033 ctx->header = (void *) __get_free_page(GFP_KERNEL);
2034 if (ctx->header == NULL)
2037 ret = context_init(&ctx->context, ohci, regs, callback);
2039 goto out_with_header;
2044 free_page((unsigned long)ctx->header);
2046 spin_lock_irqsave(&ohci->lock, flags);
2047 *mask |= 1 << index;
2048 spin_unlock_irqrestore(&ohci->lock, flags);
2050 return ERR_PTR(ret);
2053 static int ohci_start_iso(struct fw_iso_context *base,
2054 s32 cycle, u32 sync, u32 tags)
2056 struct iso_context *ctx = container_of(base, struct iso_context, base);
2057 struct fw_ohci *ohci = ctx->context.ohci;
2061 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2062 index = ctx - ohci->it_context_list;
2065 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
2066 (cycle & 0x7fff) << 16;
2068 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2069 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2070 context_run(&ctx->context, match);
2072 index = ctx - ohci->ir_context_list;
2073 control = IR_CONTEXT_ISOCH_HEADER;
2074 if (ohci->use_dualbuffer)
2075 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
2076 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2078 match |= (cycle & 0x07fff) << 12;
2079 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2082 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2083 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2084 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2085 context_run(&ctx->context, control);
2091 static int ohci_stop_iso(struct fw_iso_context *base)
2093 struct fw_ohci *ohci = fw_ohci(base->card);
2094 struct iso_context *ctx = container_of(base, struct iso_context, base);
2097 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2098 index = ctx - ohci->it_context_list;
2099 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2101 index = ctx - ohci->ir_context_list;
2102 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2105 context_stop(&ctx->context);
2110 static void ohci_free_iso_context(struct fw_iso_context *base)
2112 struct fw_ohci *ohci = fw_ohci(base->card);
2113 struct iso_context *ctx = container_of(base, struct iso_context, base);
2114 unsigned long flags;
2117 ohci_stop_iso(base);
2118 context_release(&ctx->context);
2119 free_page((unsigned long)ctx->header);
2121 spin_lock_irqsave(&ohci->lock, flags);
2123 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2124 index = ctx - ohci->it_context_list;
2125 ohci->it_context_mask |= 1 << index;
2127 index = ctx - ohci->ir_context_list;
2128 ohci->ir_context_mask |= 1 << index;
2129 ohci->ir_context_channels |= 1ULL << base->channel;
2132 spin_unlock_irqrestore(&ohci->lock, flags);
2135 static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2136 struct fw_iso_packet *packet,
2137 struct fw_iso_buffer *buffer,
2138 unsigned long payload)
2140 struct iso_context *ctx = container_of(base, struct iso_context, base);
2141 struct descriptor *d, *last, *pd;
2142 struct fw_iso_packet *p;
2144 dma_addr_t d_bus, page_bus;
2145 u32 z, header_z, payload_z, irq;
2146 u32 payload_index, payload_end_index, next_page_index;
2147 int page, end_page, i, length, offset;
2150 * FIXME: Cycle lost behavior should be configurable: lose
2151 * packet, retransmit or terminate..
2155 payload_index = payload;
2161 if (p->header_length > 0)
2164 /* Determine the first page the payload isn't contained in. */
2165 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2166 if (p->payload_length > 0)
2167 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2173 /* Get header size in number of descriptors. */
2174 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
2176 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2181 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2182 d[0].req_count = cpu_to_le16(8);
2184 header = (__le32 *) &d[1];
2185 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2186 IT_HEADER_TAG(p->tag) |
2187 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2188 IT_HEADER_CHANNEL(ctx->base.channel) |
2189 IT_HEADER_SPEED(ctx->base.speed));
2191 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2192 p->payload_length));
2195 if (p->header_length > 0) {
2196 d[2].req_count = cpu_to_le16(p->header_length);
2197 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2198 memcpy(&d[z], p->header, p->header_length);
2201 pd = d + z - payload_z;
2202 payload_end_index = payload_index + p->payload_length;
2203 for (i = 0; i < payload_z; i++) {
2204 page = payload_index >> PAGE_SHIFT;
2205 offset = payload_index & ~PAGE_MASK;
2206 next_page_index = (page + 1) << PAGE_SHIFT;
2208 min(next_page_index, payload_end_index) - payload_index;
2209 pd[i].req_count = cpu_to_le16(length);
2211 page_bus = page_private(buffer->pages[page]);
2212 pd[i].data_address = cpu_to_le32(page_bus + offset);
2214 payload_index += length;
2218 irq = DESCRIPTOR_IRQ_ALWAYS;
2220 irq = DESCRIPTOR_NO_IRQ;
2222 last = z == 2 ? d : d + z - 1;
2223 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2225 DESCRIPTOR_BRANCH_ALWAYS |
2228 context_append(&ctx->context, d, z, header_z);
2233 static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2234 struct fw_iso_packet *packet,
2235 struct fw_iso_buffer *buffer,
2236 unsigned long payload)
2238 struct iso_context *ctx = container_of(base, struct iso_context, base);
2239 struct db_descriptor *db = NULL;
2240 struct descriptor *d;
2241 struct fw_iso_packet *p;
2242 dma_addr_t d_bus, page_bus;
2243 u32 z, header_z, length, rest;
2244 int page, offset, packet_count, header_size;
2247 * FIXME: Cycle lost behavior should be configurable: lose
2248 * packet, retransmit or terminate..
2255 * The OHCI controller puts the isochronous header and trailer in the
2256 * buffer, so we need at least 8 bytes.
2258 packet_count = p->header_length / ctx->base.header_size;
2259 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2261 /* Get header size in number of descriptors. */
2262 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2263 page = payload >> PAGE_SHIFT;
2264 offset = payload & ~PAGE_MASK;
2265 rest = p->payload_length;
2267 * The controllers I've tested have not worked correctly when
2268 * second_req_count is zero. Rather than do something we know won't
2269 * work, return an error
2275 d = context_get_descriptors(&ctx->context,
2276 z + header_z, &d_bus);
2280 db = (struct db_descriptor *) d;
2281 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2282 DESCRIPTOR_BRANCH_ALWAYS);
2284 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2285 if (p->skip && rest == p->payload_length) {
2286 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2287 db->first_req_count = db->first_size;
2289 db->first_req_count = cpu_to_le16(header_size);
2291 db->first_res_count = db->first_req_count;
2292 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2294 if (p->skip && rest == p->payload_length)
2296 else if (offset + rest < PAGE_SIZE)
2299 length = PAGE_SIZE - offset;
2301 db->second_req_count = cpu_to_le16(length);
2302 db->second_res_count = db->second_req_count;
2303 page_bus = page_private(buffer->pages[page]);
2304 db->second_buffer = cpu_to_le32(page_bus + offset);
2306 if (p->interrupt && length == rest)
2307 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2309 context_append(&ctx->context, d, z, header_z);
2310 offset = (offset + length) & ~PAGE_MASK;
2319 static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2320 struct fw_iso_packet *packet,
2321 struct fw_iso_buffer *buffer,
2322 unsigned long payload)
2324 struct iso_context *ctx = container_of(base, struct iso_context, base);
2325 struct descriptor *d, *pd;
2326 struct fw_iso_packet *p = packet;
2327 dma_addr_t d_bus, page_bus;
2328 u32 z, header_z, rest;
2330 int page, offset, packet_count, header_size, payload_per_buffer;
2333 * The OHCI controller puts the isochronous header and trailer in the
2334 * buffer, so we need at least 8 bytes.
2336 packet_count = p->header_length / ctx->base.header_size;
2337 header_size = max(ctx->base.header_size, (size_t)8);
2339 /* Get header size in number of descriptors. */
2340 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2341 page = payload >> PAGE_SHIFT;
2342 offset = payload & ~PAGE_MASK;
2343 payload_per_buffer = p->payload_length / packet_count;
2345 for (i = 0; i < packet_count; i++) {
2346 /* d points to the header descriptor */
2347 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2348 d = context_get_descriptors(&ctx->context,
2349 z + header_z, &d_bus);
2353 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2354 DESCRIPTOR_INPUT_MORE);
2355 if (p->skip && i == 0)
2356 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2357 d->req_count = cpu_to_le16(header_size);
2358 d->res_count = d->req_count;
2359 d->transfer_status = 0;
2360 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2362 rest = payload_per_buffer;
2364 for (j = 1; j < z; j++) {
2366 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2367 DESCRIPTOR_INPUT_MORE);
2369 if (offset + rest < PAGE_SIZE)
2372 length = PAGE_SIZE - offset;
2373 pd->req_count = cpu_to_le16(length);
2374 pd->res_count = pd->req_count;
2375 pd->transfer_status = 0;
2377 page_bus = page_private(buffer->pages[page]);
2378 pd->data_address = cpu_to_le32(page_bus + offset);
2380 offset = (offset + length) & ~PAGE_MASK;
2385 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2386 DESCRIPTOR_INPUT_LAST |
2387 DESCRIPTOR_BRANCH_ALWAYS);
2388 if (p->interrupt && i == packet_count - 1)
2389 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2391 context_append(&ctx->context, d, z, header_z);
2397 static int ohci_queue_iso(struct fw_iso_context *base,
2398 struct fw_iso_packet *packet,
2399 struct fw_iso_buffer *buffer,
2400 unsigned long payload)
2402 struct iso_context *ctx = container_of(base, struct iso_context, base);
2403 unsigned long flags;
2406 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2407 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2408 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2409 else if (ctx->context.ohci->use_dualbuffer)
2410 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2413 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2415 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2420 static const struct fw_card_driver ohci_driver = {
2421 .enable = ohci_enable,
2422 .update_phy_reg = ohci_update_phy_reg,
2423 .set_config_rom = ohci_set_config_rom,
2424 .send_request = ohci_send_request,
2425 .send_response = ohci_send_response,
2426 .cancel_packet = ohci_cancel_packet,
2427 .enable_phys_dma = ohci_enable_phys_dma,
2428 .get_bus_time = ohci_get_bus_time,
2430 .allocate_iso_context = ohci_allocate_iso_context,
2431 .free_iso_context = ohci_free_iso_context,
2432 .queue_iso = ohci_queue_iso,
2433 .start_iso = ohci_start_iso,
2434 .stop_iso = ohci_stop_iso,
2437 #ifdef CONFIG_PPC_PMAC
2438 static void ohci_pmac_on(struct pci_dev *dev)
2440 if (machine_is(powermac)) {
2441 struct device_node *ofn = pci_device_to_OF_node(dev);
2444 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2445 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2450 static void ohci_pmac_off(struct pci_dev *dev)
2452 if (machine_is(powermac)) {
2453 struct device_node *ofn = pci_device_to_OF_node(dev);
2456 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2457 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2462 #define ohci_pmac_on(dev)
2463 #define ohci_pmac_off(dev)
2464 #endif /* CONFIG_PPC_PMAC */
2466 #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2467 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
2469 static int __devinit pci_probe(struct pci_dev *dev,
2470 const struct pci_device_id *ent)
2472 struct fw_ohci *ohci;
2473 u32 bus_options, max_receive, link_speed, version;
2478 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2484 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2488 err = pci_enable_device(dev);
2490 fw_error("Failed to enable OHCI hardware\n");
2494 pci_set_master(dev);
2495 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2496 pci_set_drvdata(dev, ohci);
2498 spin_lock_init(&ohci->lock);
2500 tasklet_init(&ohci->bus_reset_tasklet,
2501 bus_reset_tasklet, (unsigned long)ohci);
2503 err = pci_request_region(dev, 0, ohci_driver_name);
2505 fw_error("MMIO resource unavailable\n");
2509 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2510 if (ohci->registers == NULL) {
2511 fw_error("Failed to remap registers\n");
2516 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2518 /* FIXME: make it a context option or remove dual-buffer mode */
2519 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2522 /* dual-buffer mode is broken if more than one IR context is active */
2523 if (dev->vendor == PCI_VENDOR_ID_AGERE &&
2524 dev->device == PCI_DEVICE_ID_AGERE_FW643)
2525 ohci->use_dualbuffer = false;
2527 /* dual-buffer mode is broken */
2528 if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2529 dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2530 ohci->use_dualbuffer = false;
2532 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2533 #if !defined(CONFIG_X86_32)
2534 /* dual-buffer mode is broken with descriptor addresses above 2G */
2535 if (dev->vendor == PCI_VENDOR_ID_TI &&
2536 dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2537 ohci->use_dualbuffer = false;
2540 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2541 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2542 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2544 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2546 ohci->iso_cycle_timer_quirk = dev->vendor == PCI_VENDOR_ID_AL ||
2547 dev->vendor == PCI_VENDOR_ID_NEC ||
2548 dev->vendor == PCI_VENDOR_ID_VIA;
2550 ar_context_init(&ohci->ar_request_ctx, ohci,
2551 OHCI1394_AsReqRcvContextControlSet);
2553 ar_context_init(&ohci->ar_response_ctx, ohci,
2554 OHCI1394_AsRspRcvContextControlSet);
2556 context_init(&ohci->at_request_ctx, ohci,
2557 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2559 context_init(&ohci->at_response_ctx, ohci,
2560 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2562 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2563 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2564 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2565 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2566 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2568 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2569 ohci->ir_context_channels = ~0ULL;
2570 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2571 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2572 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2573 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2575 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2580 /* self-id dma buffer allocation */
2581 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2585 if (ohci->self_id_cpu == NULL) {
2590 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2591 max_receive = (bus_options >> 12) & 0xf;
2592 link_speed = bus_options & 0x7;
2593 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2594 reg_read(ohci, OHCI1394_GUIDLo);
2596 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2600 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2601 dev_name(&dev->dev), version >> 16, version & 0xff);
2606 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2607 ohci->self_id_cpu, ohci->self_id_bus);
2609 kfree(ohci->ir_context_list);
2610 kfree(ohci->it_context_list);
2611 context_release(&ohci->at_response_ctx);
2612 context_release(&ohci->at_request_ctx);
2613 ar_context_release(&ohci->ar_response_ctx);
2614 ar_context_release(&ohci->ar_request_ctx);
2615 pci_iounmap(dev, ohci->registers);
2617 pci_release_region(dev, 0);
2619 pci_disable_device(dev);
2625 fw_error("Out of memory\n");
2630 static void pci_remove(struct pci_dev *dev)
2632 struct fw_ohci *ohci;
2634 ohci = pci_get_drvdata(dev);
2635 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2637 fw_core_remove_card(&ohci->card);
2640 * FIXME: Fail all pending packets here, now that the upper
2641 * layers can't queue any more.
2644 software_reset(ohci);
2645 free_irq(dev->irq, ohci);
2647 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
2648 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2649 ohci->next_config_rom, ohci->next_config_rom_bus);
2650 if (ohci->config_rom)
2651 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2652 ohci->config_rom, ohci->config_rom_bus);
2653 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2654 ohci->self_id_cpu, ohci->self_id_bus);
2655 ar_context_release(&ohci->ar_request_ctx);
2656 ar_context_release(&ohci->ar_response_ctx);
2657 context_release(&ohci->at_request_ctx);
2658 context_release(&ohci->at_response_ctx);
2659 kfree(ohci->it_context_list);
2660 kfree(ohci->ir_context_list);
2661 pci_iounmap(dev, ohci->registers);
2662 pci_release_region(dev, 0);
2663 pci_disable_device(dev);
2667 fw_notify("Removed fw-ohci device.\n");
2671 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2673 struct fw_ohci *ohci = pci_get_drvdata(dev);
2676 software_reset(ohci);
2677 free_irq(dev->irq, ohci);
2678 err = pci_save_state(dev);
2680 fw_error("pci_save_state failed\n");
2683 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2685 fw_error("pci_set_power_state failed with %d\n", err);
2691 static int pci_resume(struct pci_dev *dev)
2693 struct fw_ohci *ohci = pci_get_drvdata(dev);
2697 pci_set_power_state(dev, PCI_D0);
2698 pci_restore_state(dev);
2699 err = pci_enable_device(dev);
2701 fw_error("pci_enable_device failed\n");
2705 return ohci_enable(&ohci->card, NULL, 0);
2709 static const struct pci_device_id pci_table[] = {
2710 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2714 MODULE_DEVICE_TABLE(pci, pci_table);
2716 static struct pci_driver fw_ohci_pci_driver = {
2717 .name = ohci_driver_name,
2718 .id_table = pci_table,
2720 .remove = pci_remove,
2722 .resume = pci_resume,
2723 .suspend = pci_suspend,
2727 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2728 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2729 MODULE_LICENSE("GPL");
2731 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2732 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2733 MODULE_ALIAS("ohci1394");
2736 static int __init fw_ohci_init(void)
2738 return pci_register_driver(&fw_ohci_pci_driver);
2741 static void __exit fw_ohci_cleanup(void)
2743 pci_unregister_driver(&fw_ohci_pci_driver);
2746 module_init(fw_ohci_init);
2747 module_exit(fw_ohci_cleanup);