2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
35 #include <asm/system.h>
37 #ifdef CONFIG_PPC_PMAC
38 #include <asm/pmac_feature.h>
42 #include "fw-transaction.h"
44 #define DESCRIPTOR_OUTPUT_MORE 0
45 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
46 #define DESCRIPTOR_INPUT_MORE (2 << 12)
47 #define DESCRIPTOR_INPUT_LAST (3 << 12)
48 #define DESCRIPTOR_STATUS (1 << 11)
49 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
50 #define DESCRIPTOR_PING (1 << 7)
51 #define DESCRIPTOR_YY (1 << 6)
52 #define DESCRIPTOR_NO_IRQ (0 << 4)
53 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
54 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
55 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
56 #define DESCRIPTOR_WAIT (3 << 0)
62 __le32 branch_address;
64 __le16 transfer_status;
65 } __attribute__((aligned(16)));
67 struct db_descriptor {
70 __le16 second_req_count;
71 __le16 first_req_count;
72 __le32 branch_address;
73 __le16 second_res_count;
74 __le16 first_res_count;
79 } __attribute__((aligned(16)));
81 #define CONTROL_SET(regs) (regs)
82 #define CONTROL_CLEAR(regs) ((regs) + 4)
83 #define COMMAND_PTR(regs) ((regs) + 12)
84 #define CONTEXT_MATCH(regs) ((regs) + 16)
87 struct descriptor descriptor;
88 struct ar_buffer *next;
94 struct ar_buffer *current_buffer;
95 struct ar_buffer *last_buffer;
98 struct tasklet_struct tasklet;
103 typedef int (*descriptor_callback_t)(struct context *ctx,
104 struct descriptor *d,
105 struct descriptor *last);
108 * A buffer that contains a block of DMA-able coherent memory used for
109 * storing a portion of a DMA descriptor program.
111 struct descriptor_buffer {
112 struct list_head list;
113 dma_addr_t buffer_bus;
116 struct descriptor buffer[0];
120 struct fw_ohci *ohci;
122 int total_allocation;
125 * List of page-sized buffers for storing DMA descriptors.
126 * Head of list contains buffers in use and tail of list contains
129 struct list_head buffer_list;
132 * Pointer to a buffer inside buffer_list that contains the tail
133 * end of the current DMA program.
135 struct descriptor_buffer *buffer_tail;
138 * The descriptor containing the branch address of the first
139 * descriptor that has not yet been filled by the device.
141 struct descriptor *last;
144 * The last descriptor in the DMA program. It contains the branch
145 * address that must be updated upon appending a new descriptor.
147 struct descriptor *prev;
149 descriptor_callback_t callback;
151 struct tasklet_struct tasklet;
154 #define IT_HEADER_SY(v) ((v) << 0)
155 #define IT_HEADER_TCODE(v) ((v) << 4)
156 #define IT_HEADER_CHANNEL(v) ((v) << 8)
157 #define IT_HEADER_TAG(v) ((v) << 14)
158 #define IT_HEADER_SPEED(v) ((v) << 16)
159 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
162 struct fw_iso_context base;
163 struct context context;
166 size_t header_length;
169 #define CONFIG_ROM_SIZE 1024
175 __iomem char *registers;
176 dma_addr_t self_id_bus;
178 struct tasklet_struct bus_reset_tasklet;
181 int request_generation;
186 * Spinlock for accessing fw_ohci data. Never call out of
187 * this driver with this lock held.
190 u32 self_id_buffer[512];
192 /* Config rom buffers */
194 dma_addr_t config_rom_bus;
195 __be32 *next_config_rom;
196 dma_addr_t next_config_rom_bus;
199 struct ar_context ar_request_ctx;
200 struct ar_context ar_response_ctx;
201 struct context at_request_ctx;
202 struct context at_response_ctx;
205 struct iso_context *it_context_list;
207 struct iso_context *ir_context_list;
210 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
212 return container_of(card, struct fw_ohci, card);
215 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
216 #define IR_CONTEXT_BUFFER_FILL 0x80000000
217 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
218 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
219 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
220 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
222 #define CONTEXT_RUN 0x8000
223 #define CONTEXT_WAKE 0x1000
224 #define CONTEXT_DEAD 0x0800
225 #define CONTEXT_ACTIVE 0x0400
227 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
228 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
229 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
231 #define FW_OHCI_MAJOR 240
232 #define OHCI1394_REGISTER_SIZE 0x800
233 #define OHCI_LOOP_COUNT 500
234 #define OHCI1394_PCI_HCI_Control 0x40
235 #define SELF_ID_BUF_SIZE 0x800
236 #define OHCI_TCODE_PHY_PACKET 0x0e
237 #define OHCI_VERSION_1_1 0x010010
239 static char ohci_driver_name[] = KBUILD_MODNAME;
241 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
243 #define OHCI_PARAM_DEBUG_IRQS 1
244 #define OHCI_PARAM_DEBUG_SELFIDS 2
245 #define OHCI_PARAM_DEBUG_AT_AR 4
247 static int param_debug;
248 module_param_named(debug, param_debug, int, 0644);
249 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
250 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
251 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
252 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
253 ", or a combination, or all = -1)");
255 static void log_irqs(u32 evt)
257 if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
260 printk(KERN_DEBUG KBUILD_MODNAME ": IRQ %08x%s%s%s%s%s%s%s%s%s%s%s\n",
262 evt & OHCI1394_selfIDComplete ? " selfID" : "",
263 evt & OHCI1394_RQPkt ? " AR_req" : "",
264 evt & OHCI1394_RSPkt ? " AR_resp" : "",
265 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
266 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
267 evt & OHCI1394_isochRx ? " IR" : "",
268 evt & OHCI1394_isochTx ? " IT" : "",
269 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
270 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
271 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
272 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
273 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
274 OHCI1394_respTxComplete | OHCI1394_isochRx |
275 OHCI1394_isochTx | OHCI1394_postedWriteErr |
276 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds)
280 static const char *speed[] = {
281 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
283 static const char *power[] = {
284 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
285 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
287 static const char port[] = { '.', '-', 'p', 'c', };
289 static char _p(u32 *s, int shift)
291 return port[*s >> shift & 3];
294 static void log_selfids(int generation, int self_id_count, u32 *s)
296 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
299 printk(KERN_DEBUG KBUILD_MODNAME ": %d selfIDs, generation %d\n",
300 self_id_count, generation);
302 for (; self_id_count--; ++s)
303 if ((*s & 1 << 23) == 0)
304 printk(KERN_DEBUG "selfID 0: %08x, phy %d [%c%c%c] "
305 "%s gc=%d %s %s%s%s\n",
306 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
307 speed[*s >> 14 & 3], *s >> 16 & 63,
308 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
309 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
311 printk(KERN_DEBUG "selfID n: %08x, phy %d "
312 "[%c%c%c%c%c%c%c%c]\n",
314 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
315 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
318 static const char *evts[] = {
319 [0x00] = "evt_no_status", [0x01] = "-reserved-",
320 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
321 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
322 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
323 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
324 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
325 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
326 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
327 [0x10] = "-reserved-", [0x11] = "ack_complete",
328 [0x12] = "ack_pending ", [0x13] = "-reserved-",
329 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
330 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
331 [0x18] = "-reserved-", [0x19] = "-reserved-",
332 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
333 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
334 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
335 [0x20] = "pending/cancelled",
337 static const char *tcodes[] = {
338 [0x0] = "QW req", [0x1] = "BW req",
339 [0x2] = "W resp", [0x3] = "-reserved-",
340 [0x4] = "QR req", [0x5] = "BR req",
341 [0x6] = "QR resp", [0x7] = "BR resp",
342 [0x8] = "cycle start", [0x9] = "Lk req",
343 [0xa] = "async stream packet", [0xb] = "Lk resp",
344 [0xc] = "-reserved-", [0xd] = "-reserved-",
345 [0xe] = "link internal", [0xf] = "-reserved-",
347 static const char *phys[] = {
348 [0x0] = "phy config packet", [0x1] = "link-on packet",
349 [0x2] = "self-id packet", [0x3] = "-reserved-",
352 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
354 int tcode = header[0] >> 4 & 0xf;
357 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
360 if (unlikely(evt >= ARRAY_SIZE(evts)))
363 if (header[0] == ~header[1]) {
364 printk(KERN_DEBUG "A%c %s, %s, %08x\n",
365 dir, evts[evt], phys[header[0] >> 30 & 0x3],
371 case 0x0: case 0x6: case 0x8:
372 snprintf(specific, sizeof(specific), " = %08x",
373 be32_to_cpu((__force __be32)header[3]));
375 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
376 snprintf(specific, sizeof(specific), " %x,%x",
377 header[3] >> 16, header[3] & 0xffff);
385 printk(KERN_DEBUG "A%c %s, %s\n",
386 dir, evts[evt], tcodes[tcode]);
388 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
389 printk(KERN_DEBUG "A%c spd %x tl %02x, "
392 dir, speed, header[0] >> 10 & 0x3f,
393 header[1] >> 16, header[0] >> 16, evts[evt],
394 tcodes[tcode], header[1] & 0xffff, header[2], specific);
397 printk(KERN_DEBUG "A%c spd %x tl %02x, "
400 dir, speed, header[0] >> 10 & 0x3f,
401 header[1] >> 16, header[0] >> 16, evts[evt],
402 tcodes[tcode], specific);
408 #define log_irqs(evt)
409 #define log_selfids(generation, self_id_count, sid)
410 #define log_ar_at_event(dir, speed, header, evt)
412 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
414 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
416 writel(data, ohci->registers + offset);
419 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
421 return readl(ohci->registers + offset);
424 static inline void flush_writes(const struct fw_ohci *ohci)
426 /* Do a dummy read to flush writes. */
427 reg_read(ohci, OHCI1394_Version);
431 ohci_update_phy_reg(struct fw_card *card, int addr,
432 int clear_bits, int set_bits)
434 struct fw_ohci *ohci = fw_ohci(card);
437 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
440 val = reg_read(ohci, OHCI1394_PhyControl);
441 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
442 fw_error("failed to set phy reg bits.\n");
446 old = OHCI1394_PhyControl_ReadData(val);
447 old = (old & ~clear_bits) | set_bits;
448 reg_write(ohci, OHCI1394_PhyControl,
449 OHCI1394_PhyControl_Write(addr, old));
454 static int ar_context_add_page(struct ar_context *ctx)
456 struct device *dev = ctx->ohci->card.device;
457 struct ar_buffer *ab;
458 dma_addr_t uninitialized_var(ab_bus);
461 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
465 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
466 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
468 DESCRIPTOR_BRANCH_ALWAYS);
469 offset = offsetof(struct ar_buffer, data);
470 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
471 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
472 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
473 ab->descriptor.branch_address = 0;
475 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
476 ctx->last_buffer->next = ab;
477 ctx->last_buffer = ab;
479 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
480 flush_writes(ctx->ohci);
485 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
486 #define cond_le32_to_cpu(v) \
487 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
489 #define cond_le32_to_cpu(v) le32_to_cpu(v)
492 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
494 struct fw_ohci *ohci = ctx->ohci;
496 u32 status, length, tcode;
498 p.header[0] = cond_le32_to_cpu(buffer[0]);
499 p.header[1] = cond_le32_to_cpu(buffer[1]);
500 p.header[2] = cond_le32_to_cpu(buffer[2]);
502 tcode = (p.header[0] >> 4) & 0x0f;
504 case TCODE_WRITE_QUADLET_REQUEST:
505 case TCODE_READ_QUADLET_RESPONSE:
506 p.header[3] = (__force __u32) buffer[3];
507 p.header_length = 16;
508 p.payload_length = 0;
511 case TCODE_READ_BLOCK_REQUEST :
512 p.header[3] = cond_le32_to_cpu(buffer[3]);
513 p.header_length = 16;
514 p.payload_length = 0;
517 case TCODE_WRITE_BLOCK_REQUEST:
518 case TCODE_READ_BLOCK_RESPONSE:
519 case TCODE_LOCK_REQUEST:
520 case TCODE_LOCK_RESPONSE:
521 p.header[3] = cond_le32_to_cpu(buffer[3]);
522 p.header_length = 16;
523 p.payload_length = p.header[3] >> 16;
526 case TCODE_WRITE_RESPONSE:
527 case TCODE_READ_QUADLET_REQUEST:
528 case OHCI_TCODE_PHY_PACKET:
529 p.header_length = 12;
530 p.payload_length = 0;
534 p.payload = (void *) buffer + p.header_length;
536 /* FIXME: What to do about evt_* errors? */
537 length = (p.header_length + p.payload_length + 3) / 4;
538 status = cond_le32_to_cpu(buffer[length]);
540 p.ack = ((status >> 16) & 0x1f) - 16;
541 p.speed = (status >> 21) & 0x7;
542 p.timestamp = status & 0xffff;
543 p.generation = ohci->request_generation;
545 log_ar_at_event('R', p.speed, p.header, status >> 16 & 0x1f);
548 * The OHCI bus reset handler synthesizes a phy packet with
549 * the new generation number when a bus reset happens (see
550 * section 8.4.2.3). This helps us determine when a request
551 * was received and make sure we send the response in the same
552 * generation. We only need this for requests; for responses
553 * we use the unique tlabel for finding the matching
557 if (p.ack + 16 == 0x09)
558 ohci->request_generation = (p.header[2] >> 16) & 0xff;
559 else if (ctx == &ohci->ar_request_ctx)
560 fw_core_handle_request(&ohci->card, &p);
562 fw_core_handle_response(&ohci->card, &p);
564 return buffer + length + 1;
567 static void ar_context_tasklet(unsigned long data)
569 struct ar_context *ctx = (struct ar_context *)data;
570 struct fw_ohci *ohci = ctx->ohci;
571 struct ar_buffer *ab;
572 struct descriptor *d;
575 ab = ctx->current_buffer;
578 if (d->res_count == 0) {
579 size_t size, rest, offset;
580 dma_addr_t start_bus;
584 * This descriptor is finished and we may have a
585 * packet split across this and the next buffer. We
586 * reuse the page for reassembling the split packet.
589 offset = offsetof(struct ar_buffer, data);
591 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
595 size = buffer + PAGE_SIZE - ctx->pointer;
596 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
597 memmove(buffer, ctx->pointer, size);
598 memcpy(buffer + size, ab->data, rest);
599 ctx->current_buffer = ab;
600 ctx->pointer = (void *) ab->data + rest;
601 end = buffer + size + rest;
604 buffer = handle_ar_packet(ctx, buffer);
606 dma_free_coherent(ohci->card.device, PAGE_SIZE,
608 ar_context_add_page(ctx);
610 buffer = ctx->pointer;
612 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
615 buffer = handle_ar_packet(ctx, buffer);
620 ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
626 ctx->last_buffer = &ab;
627 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
629 ar_context_add_page(ctx);
630 ar_context_add_page(ctx);
631 ctx->current_buffer = ab.next;
632 ctx->pointer = ctx->current_buffer->data;
637 static void ar_context_run(struct ar_context *ctx)
639 struct ar_buffer *ab = ctx->current_buffer;
643 offset = offsetof(struct ar_buffer, data);
644 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
646 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
647 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
648 flush_writes(ctx->ohci);
651 static struct descriptor *
652 find_branch_descriptor(struct descriptor *d, int z)
656 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
657 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
659 /* figure out which descriptor the branch address goes in */
660 if (z == 2 && (b == 3 || key == 2))
666 static void context_tasklet(unsigned long data)
668 struct context *ctx = (struct context *) data;
669 struct descriptor *d, *last;
672 struct descriptor_buffer *desc;
674 desc = list_entry(ctx->buffer_list.next,
675 struct descriptor_buffer, list);
677 while (last->branch_address != 0) {
678 struct descriptor_buffer *old_desc = desc;
679 address = le32_to_cpu(last->branch_address);
683 /* If the branch address points to a buffer outside of the
684 * current buffer, advance to the next buffer. */
685 if (address < desc->buffer_bus ||
686 address >= desc->buffer_bus + desc->used)
687 desc = list_entry(desc->list.next,
688 struct descriptor_buffer, list);
689 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
690 last = find_branch_descriptor(d, z);
692 if (!ctx->callback(ctx, d, last))
695 if (old_desc != desc) {
696 /* If we've advanced to the next buffer, move the
697 * previous buffer to the free list. */
700 spin_lock_irqsave(&ctx->ohci->lock, flags);
701 list_move_tail(&old_desc->list, &ctx->buffer_list);
702 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
709 * Allocate a new buffer and add it to the list of free buffers for this
710 * context. Must be called with ohci->lock held.
713 context_add_buffer(struct context *ctx)
715 struct descriptor_buffer *desc;
716 dma_addr_t uninitialized_var(bus_addr);
720 * 16MB of descriptors should be far more than enough for any DMA
721 * program. This will catch run-away userspace or DoS attacks.
723 if (ctx->total_allocation >= 16*1024*1024)
726 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
727 &bus_addr, GFP_ATOMIC);
731 offset = (void *)&desc->buffer - (void *)desc;
732 desc->buffer_size = PAGE_SIZE - offset;
733 desc->buffer_bus = bus_addr + offset;
736 list_add_tail(&desc->list, &ctx->buffer_list);
737 ctx->total_allocation += PAGE_SIZE;
743 context_init(struct context *ctx, struct fw_ohci *ohci,
744 u32 regs, descriptor_callback_t callback)
748 ctx->total_allocation = 0;
750 INIT_LIST_HEAD(&ctx->buffer_list);
751 if (context_add_buffer(ctx) < 0)
754 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
755 struct descriptor_buffer, list);
757 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
758 ctx->callback = callback;
761 * We put a dummy descriptor in the buffer that has a NULL
762 * branch address and looks like it's been sent. That way we
763 * have a descriptor to append DMA programs to.
765 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
766 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
767 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
768 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
769 ctx->last = ctx->buffer_tail->buffer;
770 ctx->prev = ctx->buffer_tail->buffer;
776 context_release(struct context *ctx)
778 struct fw_card *card = &ctx->ohci->card;
779 struct descriptor_buffer *desc, *tmp;
781 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
782 dma_free_coherent(card->device, PAGE_SIZE, desc,
784 ((void *)&desc->buffer - (void *)desc));
787 /* Must be called with ohci->lock held */
788 static struct descriptor *
789 context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
791 struct descriptor *d = NULL;
792 struct descriptor_buffer *desc = ctx->buffer_tail;
794 if (z * sizeof(*d) > desc->buffer_size)
797 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
798 /* No room for the descriptor in this buffer, so advance to the
801 if (desc->list.next == &ctx->buffer_list) {
802 /* If there is no free buffer next in the list,
804 if (context_add_buffer(ctx) < 0)
807 desc = list_entry(desc->list.next,
808 struct descriptor_buffer, list);
809 ctx->buffer_tail = desc;
812 d = desc->buffer + desc->used / sizeof(*d);
813 memset(d, 0, z * sizeof(*d));
814 *d_bus = desc->buffer_bus + desc->used;
819 static void context_run(struct context *ctx, u32 extra)
821 struct fw_ohci *ohci = ctx->ohci;
823 reg_write(ohci, COMMAND_PTR(ctx->regs),
824 le32_to_cpu(ctx->last->branch_address));
825 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
826 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
830 static void context_append(struct context *ctx,
831 struct descriptor *d, int z, int extra)
834 struct descriptor_buffer *desc = ctx->buffer_tail;
836 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
838 desc->used += (z + extra) * sizeof(*d);
839 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
840 ctx->prev = find_branch_descriptor(d, z);
842 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
843 flush_writes(ctx->ohci);
846 static void context_stop(struct context *ctx)
851 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
852 flush_writes(ctx->ohci);
854 for (i = 0; i < 10; i++) {
855 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
856 if ((reg & CONTEXT_ACTIVE) == 0)
859 fw_notify("context_stop: still active (0x%08x)\n", reg);
865 struct fw_packet *packet;
869 * This function apppends a packet to the DMA queue for transmission.
870 * Must always be called with the ochi->lock held to ensure proper
871 * generation handling and locking around packet queue manipulation.
874 at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
876 struct fw_ohci *ohci = ctx->ohci;
877 dma_addr_t d_bus, uninitialized_var(payload_bus);
878 struct driver_data *driver_data;
879 struct descriptor *d, *last;
884 d = context_get_descriptors(ctx, 4, &d_bus);
886 packet->ack = RCODE_SEND_ERROR;
890 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
891 d[0].res_count = cpu_to_le16(packet->timestamp);
894 * The DMA format for asyncronous link packets is different
895 * from the IEEE1394 layout, so shift the fields around
896 * accordingly. If header_length is 8, it's a PHY packet, to
897 * which we need to prepend an extra quadlet.
900 header = (__le32 *) &d[1];
901 if (packet->header_length > 8) {
902 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
903 (packet->speed << 16));
904 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
905 (packet->header[0] & 0xffff0000));
906 header[2] = cpu_to_le32(packet->header[2]);
908 tcode = (packet->header[0] >> 4) & 0x0f;
909 if (TCODE_IS_BLOCK_PACKET(tcode))
910 header[3] = cpu_to_le32(packet->header[3]);
912 header[3] = (__force __le32) packet->header[3];
914 d[0].req_count = cpu_to_le16(packet->header_length);
916 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
917 (packet->speed << 16));
918 header[1] = cpu_to_le32(packet->header[0]);
919 header[2] = cpu_to_le32(packet->header[1]);
920 d[0].req_count = cpu_to_le16(12);
923 driver_data = (struct driver_data *) &d[3];
924 driver_data->packet = packet;
925 packet->driver_data = driver_data;
927 if (packet->payload_length > 0) {
929 dma_map_single(ohci->card.device, packet->payload,
930 packet->payload_length, DMA_TO_DEVICE);
931 if (dma_mapping_error(payload_bus)) {
932 packet->ack = RCODE_SEND_ERROR;
936 d[2].req_count = cpu_to_le16(packet->payload_length);
937 d[2].data_address = cpu_to_le32(payload_bus);
945 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
946 DESCRIPTOR_IRQ_ALWAYS |
947 DESCRIPTOR_BRANCH_ALWAYS);
949 /* FIXME: Document how the locking works. */
950 if (ohci->generation != packet->generation) {
951 if (packet->payload_length > 0)
952 dma_unmap_single(ohci->card.device, payload_bus,
953 packet->payload_length, DMA_TO_DEVICE);
954 packet->ack = RCODE_GENERATION;
958 context_append(ctx, d, z, 4 - z);
960 /* If the context isn't already running, start it up. */
961 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
962 if ((reg & CONTEXT_RUN) == 0)
968 static int handle_at_packet(struct context *context,
969 struct descriptor *d,
970 struct descriptor *last)
972 struct driver_data *driver_data;
973 struct fw_packet *packet;
974 struct fw_ohci *ohci = context->ohci;
975 dma_addr_t payload_bus;
978 if (last->transfer_status == 0)
979 /* This descriptor isn't done yet, stop iteration. */
982 driver_data = (struct driver_data *) &d[3];
983 packet = driver_data->packet;
985 /* This packet was cancelled, just continue. */
988 payload_bus = le32_to_cpu(last->data_address);
989 if (payload_bus != 0)
990 dma_unmap_single(ohci->card.device, payload_bus,
991 packet->payload_length, DMA_TO_DEVICE);
993 evt = le16_to_cpu(last->transfer_status) & 0x1f;
994 packet->timestamp = le16_to_cpu(last->res_count);
996 log_ar_at_event('T', packet->speed, packet->header, evt);
999 case OHCI1394_evt_timeout:
1000 /* Async response transmit timed out. */
1001 packet->ack = RCODE_CANCELLED;
1004 case OHCI1394_evt_flushed:
1006 * The packet was flushed should give same error as
1007 * when we try to use a stale generation count.
1009 packet->ack = RCODE_GENERATION;
1012 case OHCI1394_evt_missing_ack:
1014 * Using a valid (current) generation count, but the
1015 * node is not on the bus or not sending acks.
1017 packet->ack = RCODE_NO_ACK;
1020 case ACK_COMPLETE + 0x10:
1021 case ACK_PENDING + 0x10:
1022 case ACK_BUSY_X + 0x10:
1023 case ACK_BUSY_A + 0x10:
1024 case ACK_BUSY_B + 0x10:
1025 case ACK_DATA_ERROR + 0x10:
1026 case ACK_TYPE_ERROR + 0x10:
1027 packet->ack = evt - 0x10;
1031 packet->ack = RCODE_SEND_ERROR;
1035 packet->callback(packet, &ohci->card, packet->ack);
1040 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1041 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1042 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1043 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1044 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1047 handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1049 struct fw_packet response;
1050 int tcode, length, i;
1052 tcode = HEADER_GET_TCODE(packet->header[0]);
1053 if (TCODE_IS_BLOCK_PACKET(tcode))
1054 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1058 i = csr - CSR_CONFIG_ROM;
1059 if (i + length > CONFIG_ROM_SIZE) {
1060 fw_fill_response(&response, packet->header,
1061 RCODE_ADDRESS_ERROR, NULL, 0);
1062 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1063 fw_fill_response(&response, packet->header,
1064 RCODE_TYPE_ERROR, NULL, 0);
1066 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1067 (void *) ohci->config_rom + i, length);
1070 fw_core_handle_response(&ohci->card, &response);
1074 handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1076 struct fw_packet response;
1077 int tcode, length, ext_tcode, sel;
1078 __be32 *payload, lock_old;
1079 u32 lock_arg, lock_data;
1081 tcode = HEADER_GET_TCODE(packet->header[0]);
1082 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1083 payload = packet->payload;
1084 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1086 if (tcode == TCODE_LOCK_REQUEST &&
1087 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1088 lock_arg = be32_to_cpu(payload[0]);
1089 lock_data = be32_to_cpu(payload[1]);
1090 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1094 fw_fill_response(&response, packet->header,
1095 RCODE_TYPE_ERROR, NULL, 0);
1099 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1100 reg_write(ohci, OHCI1394_CSRData, lock_data);
1101 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1102 reg_write(ohci, OHCI1394_CSRControl, sel);
1104 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1105 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
1107 fw_notify("swap not done yet\n");
1109 fw_fill_response(&response, packet->header,
1110 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1112 fw_core_handle_response(&ohci->card, &response);
1116 handle_local_request(struct context *ctx, struct fw_packet *packet)
1121 if (ctx == &ctx->ohci->at_request_ctx) {
1122 packet->ack = ACK_PENDING;
1123 packet->callback(packet, &ctx->ohci->card, packet->ack);
1127 ((unsigned long long)
1128 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1130 csr = offset - CSR_REGISTER_BASE;
1132 /* Handle config rom reads. */
1133 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1134 handle_local_rom(ctx->ohci, packet, csr);
1136 case CSR_BUS_MANAGER_ID:
1137 case CSR_BANDWIDTH_AVAILABLE:
1138 case CSR_CHANNELS_AVAILABLE_HI:
1139 case CSR_CHANNELS_AVAILABLE_LO:
1140 handle_local_lock(ctx->ohci, packet, csr);
1143 if (ctx == &ctx->ohci->at_request_ctx)
1144 fw_core_handle_request(&ctx->ohci->card, packet);
1146 fw_core_handle_response(&ctx->ohci->card, packet);
1150 if (ctx == &ctx->ohci->at_response_ctx) {
1151 packet->ack = ACK_COMPLETE;
1152 packet->callback(packet, &ctx->ohci->card, packet->ack);
1157 at_context_transmit(struct context *ctx, struct fw_packet *packet)
1159 unsigned long flags;
1162 spin_lock_irqsave(&ctx->ohci->lock, flags);
1164 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1165 ctx->ohci->generation == packet->generation) {
1166 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1167 handle_local_request(ctx, packet);
1171 retval = at_context_queue_packet(ctx, packet);
1172 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1175 packet->callback(packet, &ctx->ohci->card, packet->ack);
1179 static void bus_reset_tasklet(unsigned long data)
1181 struct fw_ohci *ohci = (struct fw_ohci *)data;
1182 int self_id_count, i, j, reg;
1183 int generation, new_generation;
1184 unsigned long flags;
1185 void *free_rom = NULL;
1186 dma_addr_t free_rom_bus = 0;
1188 reg = reg_read(ohci, OHCI1394_NodeID);
1189 if (!(reg & OHCI1394_NodeID_idValid)) {
1190 fw_notify("node ID not valid, new bus reset in progress\n");
1193 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1194 fw_notify("malconfigured bus\n");
1197 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1198 OHCI1394_NodeID_nodeNumber);
1200 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1201 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1202 fw_notify("inconsistent self IDs\n");
1206 * The count in the SelfIDCount register is the number of
1207 * bytes in the self ID receive buffer. Since we also receive
1208 * the inverted quadlets and a header quadlet, we shift one
1209 * bit extra to get the actual number of self IDs.
1211 self_id_count = (reg >> 3) & 0x3ff;
1212 if (self_id_count == 0) {
1213 fw_notify("inconsistent self IDs\n");
1216 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1219 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1220 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1221 fw_notify("inconsistent self IDs\n");
1224 ohci->self_id_buffer[j] =
1225 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1230 * Check the consistency of the self IDs we just read. The
1231 * problem we face is that a new bus reset can start while we
1232 * read out the self IDs from the DMA buffer. If this happens,
1233 * the DMA buffer will be overwritten with new self IDs and we
1234 * will read out inconsistent data. The OHCI specification
1235 * (section 11.2) recommends a technique similar to
1236 * linux/seqlock.h, where we remember the generation of the
1237 * self IDs in the buffer before reading them out and compare
1238 * it to the current generation after reading them out. If
1239 * the two generations match we know we have a consistent set
1243 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1244 if (new_generation != generation) {
1245 fw_notify("recursive bus reset detected, "
1246 "discarding self ids\n");
1250 /* FIXME: Document how the locking works. */
1251 spin_lock_irqsave(&ohci->lock, flags);
1253 ohci->generation = generation;
1254 context_stop(&ohci->at_request_ctx);
1255 context_stop(&ohci->at_response_ctx);
1256 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1259 * This next bit is unrelated to the AT context stuff but we
1260 * have to do it under the spinlock also. If a new config rom
1261 * was set up before this reset, the old one is now no longer
1262 * in use and we can free it. Update the config rom pointers
1263 * to point to the current config rom and clear the
1264 * next_config_rom pointer so a new udpate can take place.
1267 if (ohci->next_config_rom != NULL) {
1268 if (ohci->next_config_rom != ohci->config_rom) {
1269 free_rom = ohci->config_rom;
1270 free_rom_bus = ohci->config_rom_bus;
1272 ohci->config_rom = ohci->next_config_rom;
1273 ohci->config_rom_bus = ohci->next_config_rom_bus;
1274 ohci->next_config_rom = NULL;
1277 * Restore config_rom image and manually update
1278 * config_rom registers. Writing the header quadlet
1279 * will indicate that the config rom is ready, so we
1282 reg_write(ohci, OHCI1394_BusOptions,
1283 be32_to_cpu(ohci->config_rom[2]));
1284 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
1285 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
1288 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1289 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1290 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1293 spin_unlock_irqrestore(&ohci->lock, flags);
1296 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1297 free_rom, free_rom_bus);
1299 log_selfids(generation, self_id_count, ohci->self_id_buffer);
1301 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1302 self_id_count, ohci->self_id_buffer);
1305 static irqreturn_t irq_handler(int irq, void *data)
1307 struct fw_ohci *ohci = data;
1308 u32 event, iso_event, cycle_time;
1311 event = reg_read(ohci, OHCI1394_IntEventClear);
1313 if (!event || !~event)
1316 reg_write(ohci, OHCI1394_IntEventClear, event);
1319 if (event & OHCI1394_selfIDComplete)
1320 tasklet_schedule(&ohci->bus_reset_tasklet);
1322 if (event & OHCI1394_RQPkt)
1323 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1325 if (event & OHCI1394_RSPkt)
1326 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1328 if (event & OHCI1394_reqTxComplete)
1329 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1331 if (event & OHCI1394_respTxComplete)
1332 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1334 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1335 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1338 i = ffs(iso_event) - 1;
1339 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1340 iso_event &= ~(1 << i);
1343 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1344 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1347 i = ffs(iso_event) - 1;
1348 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1349 iso_event &= ~(1 << i);
1352 if (unlikely(event & OHCI1394_postedWriteErr))
1353 fw_error("PCI posted write error\n");
1355 if (unlikely(event & OHCI1394_cycleTooLong)) {
1356 if (printk_ratelimit())
1357 fw_notify("isochronous cycle too long\n");
1358 reg_write(ohci, OHCI1394_LinkControlSet,
1359 OHCI1394_LinkControl_cycleMaster);
1362 if (event & OHCI1394_cycle64Seconds) {
1363 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1364 if ((cycle_time & 0x80000000) == 0)
1365 ohci->bus_seconds++;
1371 static int software_reset(struct fw_ohci *ohci)
1375 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1377 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1378 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1379 OHCI1394_HCControl_softReset) == 0)
1387 static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1389 struct fw_ohci *ohci = fw_ohci(card);
1390 struct pci_dev *dev = to_pci_dev(card->device);
1392 if (software_reset(ohci)) {
1393 fw_error("Failed to reset ohci card.\n");
1398 * Now enable LPS, which we need in order to start accessing
1399 * most of the registers. In fact, on some cards (ALI M5251),
1400 * accessing registers in the SClk domain without LPS enabled
1401 * will lock up the machine. Wait 50msec to make sure we have
1402 * full link enabled.
1404 reg_write(ohci, OHCI1394_HCControlSet,
1405 OHCI1394_HCControl_LPS |
1406 OHCI1394_HCControl_postedWriteEnable);
1410 reg_write(ohci, OHCI1394_HCControlClear,
1411 OHCI1394_HCControl_noByteSwapData);
1413 reg_write(ohci, OHCI1394_LinkControlSet,
1414 OHCI1394_LinkControl_rcvSelfID |
1415 OHCI1394_LinkControl_cycleTimerEnable |
1416 OHCI1394_LinkControl_cycleMaster);
1418 reg_write(ohci, OHCI1394_ATRetries,
1419 OHCI1394_MAX_AT_REQ_RETRIES |
1420 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1421 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1423 ar_context_run(&ohci->ar_request_ctx);
1424 ar_context_run(&ohci->ar_response_ctx);
1426 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1427 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1428 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1429 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1430 reg_write(ohci, OHCI1394_IntMaskSet,
1431 OHCI1394_selfIDComplete |
1432 OHCI1394_RQPkt | OHCI1394_RSPkt |
1433 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1434 OHCI1394_isochRx | OHCI1394_isochTx |
1435 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1436 OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
1438 /* Activate link_on bit and contender bit in our self ID packets.*/
1439 if (ohci_update_phy_reg(card, 4, 0,
1440 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1444 * When the link is not yet enabled, the atomic config rom
1445 * update mechanism described below in ohci_set_config_rom()
1446 * is not active. We have to update ConfigRomHeader and
1447 * BusOptions manually, and the write to ConfigROMmap takes
1448 * effect immediately. We tie this to the enabling of the
1449 * link, so we have a valid config rom before enabling - the
1450 * OHCI requires that ConfigROMhdr and BusOptions have valid
1451 * values before enabling.
1453 * However, when the ConfigROMmap is written, some controllers
1454 * always read back quadlets 0 and 2 from the config rom to
1455 * the ConfigRomHeader and BusOptions registers on bus reset.
1456 * They shouldn't do that in this initial case where the link
1457 * isn't enabled. This means we have to use the same
1458 * workaround here, setting the bus header to 0 and then write
1459 * the right values in the bus reset tasklet.
1463 ohci->next_config_rom =
1464 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1465 &ohci->next_config_rom_bus,
1467 if (ohci->next_config_rom == NULL)
1470 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1471 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1474 * In the suspend case, config_rom is NULL, which
1475 * means that we just reuse the old config rom.
1477 ohci->next_config_rom = ohci->config_rom;
1478 ohci->next_config_rom_bus = ohci->config_rom_bus;
1481 ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
1482 ohci->next_config_rom[0] = 0;
1483 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1484 reg_write(ohci, OHCI1394_BusOptions,
1485 be32_to_cpu(ohci->next_config_rom[2]));
1486 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1488 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1490 if (request_irq(dev->irq, irq_handler,
1491 IRQF_SHARED, ohci_driver_name, ohci)) {
1492 fw_error("Failed to allocate shared interrupt %d.\n",
1494 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1495 ohci->config_rom, ohci->config_rom_bus);
1499 reg_write(ohci, OHCI1394_HCControlSet,
1500 OHCI1394_HCControl_linkEnable |
1501 OHCI1394_HCControl_BIBimageValid);
1505 * We are ready to go, initiate bus reset to finish the
1509 fw_core_initiate_bus_reset(&ohci->card, 1);
1515 ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1517 struct fw_ohci *ohci;
1518 unsigned long flags;
1519 int retval = -EBUSY;
1520 __be32 *next_config_rom;
1521 dma_addr_t uninitialized_var(next_config_rom_bus);
1523 ohci = fw_ohci(card);
1526 * When the OHCI controller is enabled, the config rom update
1527 * mechanism is a bit tricky, but easy enough to use. See
1528 * section 5.5.6 in the OHCI specification.
1530 * The OHCI controller caches the new config rom address in a
1531 * shadow register (ConfigROMmapNext) and needs a bus reset
1532 * for the changes to take place. When the bus reset is
1533 * detected, the controller loads the new values for the
1534 * ConfigRomHeader and BusOptions registers from the specified
1535 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1536 * shadow register. All automatically and atomically.
1538 * Now, there's a twist to this story. The automatic load of
1539 * ConfigRomHeader and BusOptions doesn't honor the
1540 * noByteSwapData bit, so with a be32 config rom, the
1541 * controller will load be32 values in to these registers
1542 * during the atomic update, even on litte endian
1543 * architectures. The workaround we use is to put a 0 in the
1544 * header quadlet; 0 is endian agnostic and means that the
1545 * config rom isn't ready yet. In the bus reset tasklet we
1546 * then set up the real values for the two registers.
1548 * We use ohci->lock to avoid racing with the code that sets
1549 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1553 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1554 &next_config_rom_bus, GFP_KERNEL);
1555 if (next_config_rom == NULL)
1558 spin_lock_irqsave(&ohci->lock, flags);
1560 if (ohci->next_config_rom == NULL) {
1561 ohci->next_config_rom = next_config_rom;
1562 ohci->next_config_rom_bus = next_config_rom_bus;
1564 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1565 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1568 ohci->next_header = config_rom[0];
1569 ohci->next_config_rom[0] = 0;
1571 reg_write(ohci, OHCI1394_ConfigROMmap,
1572 ohci->next_config_rom_bus);
1576 spin_unlock_irqrestore(&ohci->lock, flags);
1579 * Now initiate a bus reset to have the changes take
1580 * effect. We clean up the old config rom memory and DMA
1581 * mappings in the bus reset tasklet, since the OHCI
1582 * controller could need to access it before the bus reset
1586 fw_core_initiate_bus_reset(&ohci->card, 1);
1588 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1589 next_config_rom, next_config_rom_bus);
1594 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1596 struct fw_ohci *ohci = fw_ohci(card);
1598 at_context_transmit(&ohci->at_request_ctx, packet);
1601 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1603 struct fw_ohci *ohci = fw_ohci(card);
1605 at_context_transmit(&ohci->at_response_ctx, packet);
1608 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1610 struct fw_ohci *ohci = fw_ohci(card);
1611 struct context *ctx = &ohci->at_request_ctx;
1612 struct driver_data *driver_data = packet->driver_data;
1613 int retval = -ENOENT;
1615 tasklet_disable(&ctx->tasklet);
1617 if (packet->ack != 0)
1620 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1621 driver_data->packet = NULL;
1622 packet->ack = RCODE_CANCELLED;
1623 packet->callback(packet, &ohci->card, packet->ack);
1627 tasklet_enable(&ctx->tasklet);
1633 ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1635 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1638 struct fw_ohci *ohci = fw_ohci(card);
1639 unsigned long flags;
1643 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1644 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1647 spin_lock_irqsave(&ohci->lock, flags);
1649 if (ohci->generation != generation) {
1655 * Note, if the node ID contains a non-local bus ID, physical DMA is
1656 * enabled for _all_ nodes on remote buses.
1659 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1661 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1663 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1667 spin_unlock_irqrestore(&ohci->lock, flags);
1669 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1673 ohci_get_bus_time(struct fw_card *card)
1675 struct fw_ohci *ohci = fw_ohci(card);
1679 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1680 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1685 static int handle_ir_dualbuffer_packet(struct context *context,
1686 struct descriptor *d,
1687 struct descriptor *last)
1689 struct iso_context *ctx =
1690 container_of(context, struct iso_context, context);
1691 struct db_descriptor *db = (struct db_descriptor *) d;
1693 size_t header_length;
1697 if (db->first_res_count != 0 && db->second_res_count != 0) {
1698 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1699 /* This descriptor isn't done yet, stop iteration. */
1702 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1705 header_length = le16_to_cpu(db->first_req_count) -
1706 le16_to_cpu(db->first_res_count);
1708 i = ctx->header_length;
1710 end = p + header_length;
1711 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1713 * The iso header is byteswapped to little endian by
1714 * the controller, but the remaining header quadlets
1715 * are big endian. We want to present all the headers
1716 * as big endian, so we have to swap the first
1719 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1720 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1721 i += ctx->base.header_size;
1722 ctx->excess_bytes +=
1723 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1724 p += ctx->base.header_size + 4;
1726 ctx->header_length = i;
1728 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1729 le16_to_cpu(db->second_res_count);
1731 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1732 ir_header = (__le32 *) (db + 1);
1733 ctx->base.callback(&ctx->base,
1734 le32_to_cpu(ir_header[0]) & 0xffff,
1735 ctx->header_length, ctx->header,
1736 ctx->base.callback_data);
1737 ctx->header_length = 0;
1743 static int handle_ir_packet_per_buffer(struct context *context,
1744 struct descriptor *d,
1745 struct descriptor *last)
1747 struct iso_context *ctx =
1748 container_of(context, struct iso_context, context);
1749 struct descriptor *pd;
1754 for (pd = d; pd <= last; pd++) {
1755 if (pd->transfer_status)
1759 /* Descriptor(s) not done yet, stop iteration */
1762 i = ctx->header_length;
1765 if (ctx->base.header_size > 0 &&
1766 i + ctx->base.header_size <= PAGE_SIZE) {
1768 * The iso header is byteswapped to little endian by
1769 * the controller, but the remaining header quadlets
1770 * are big endian. We want to present all the headers
1771 * as big endian, so we have to swap the first quadlet.
1773 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1774 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1775 ctx->header_length += ctx->base.header_size;
1778 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1779 ir_header = (__le32 *) p;
1780 ctx->base.callback(&ctx->base,
1781 le32_to_cpu(ir_header[0]) & 0xffff,
1782 ctx->header_length, ctx->header,
1783 ctx->base.callback_data);
1784 ctx->header_length = 0;
1790 static int handle_it_packet(struct context *context,
1791 struct descriptor *d,
1792 struct descriptor *last)
1794 struct iso_context *ctx =
1795 container_of(context, struct iso_context, context);
1797 if (last->transfer_status == 0)
1798 /* This descriptor isn't done yet, stop iteration. */
1801 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1802 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1803 0, NULL, ctx->base.callback_data);
1808 static struct fw_iso_context *
1809 ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1811 struct fw_ohci *ohci = fw_ohci(card);
1812 struct iso_context *ctx, *list;
1813 descriptor_callback_t callback;
1815 unsigned long flags;
1816 int index, retval = -ENOMEM;
1818 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1819 mask = &ohci->it_context_mask;
1820 list = ohci->it_context_list;
1821 callback = handle_it_packet;
1823 mask = &ohci->ir_context_mask;
1824 list = ohci->ir_context_list;
1825 if (ohci->version >= OHCI_VERSION_1_1)
1826 callback = handle_ir_dualbuffer_packet;
1828 callback = handle_ir_packet_per_buffer;
1831 spin_lock_irqsave(&ohci->lock, flags);
1832 index = ffs(*mask) - 1;
1834 *mask &= ~(1 << index);
1835 spin_unlock_irqrestore(&ohci->lock, flags);
1838 return ERR_PTR(-EBUSY);
1840 if (type == FW_ISO_CONTEXT_TRANSMIT)
1841 regs = OHCI1394_IsoXmitContextBase(index);
1843 regs = OHCI1394_IsoRcvContextBase(index);
1846 memset(ctx, 0, sizeof(*ctx));
1847 ctx->header_length = 0;
1848 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1849 if (ctx->header == NULL)
1852 retval = context_init(&ctx->context, ohci, regs, callback);
1854 goto out_with_header;
1859 free_page((unsigned long)ctx->header);
1861 spin_lock_irqsave(&ohci->lock, flags);
1862 *mask |= 1 << index;
1863 spin_unlock_irqrestore(&ohci->lock, flags);
1865 return ERR_PTR(retval);
1868 static int ohci_start_iso(struct fw_iso_context *base,
1869 s32 cycle, u32 sync, u32 tags)
1871 struct iso_context *ctx = container_of(base, struct iso_context, base);
1872 struct fw_ohci *ohci = ctx->context.ohci;
1876 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1877 index = ctx - ohci->it_context_list;
1880 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1881 (cycle & 0x7fff) << 16;
1883 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1884 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1885 context_run(&ctx->context, match);
1887 index = ctx - ohci->ir_context_list;
1888 control = IR_CONTEXT_ISOCH_HEADER;
1889 if (ohci->version >= OHCI_VERSION_1_1)
1890 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
1891 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1893 match |= (cycle & 0x07fff) << 12;
1894 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1897 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1898 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1899 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1900 context_run(&ctx->context, control);
1906 static int ohci_stop_iso(struct fw_iso_context *base)
1908 struct fw_ohci *ohci = fw_ohci(base->card);
1909 struct iso_context *ctx = container_of(base, struct iso_context, base);
1912 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1913 index = ctx - ohci->it_context_list;
1914 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1916 index = ctx - ohci->ir_context_list;
1917 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1920 context_stop(&ctx->context);
1925 static void ohci_free_iso_context(struct fw_iso_context *base)
1927 struct fw_ohci *ohci = fw_ohci(base->card);
1928 struct iso_context *ctx = container_of(base, struct iso_context, base);
1929 unsigned long flags;
1932 ohci_stop_iso(base);
1933 context_release(&ctx->context);
1934 free_page((unsigned long)ctx->header);
1936 spin_lock_irqsave(&ohci->lock, flags);
1938 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1939 index = ctx - ohci->it_context_list;
1940 ohci->it_context_mask |= 1 << index;
1942 index = ctx - ohci->ir_context_list;
1943 ohci->ir_context_mask |= 1 << index;
1946 spin_unlock_irqrestore(&ohci->lock, flags);
1950 ohci_queue_iso_transmit(struct fw_iso_context *base,
1951 struct fw_iso_packet *packet,
1952 struct fw_iso_buffer *buffer,
1953 unsigned long payload)
1955 struct iso_context *ctx = container_of(base, struct iso_context, base);
1956 struct descriptor *d, *last, *pd;
1957 struct fw_iso_packet *p;
1959 dma_addr_t d_bus, page_bus;
1960 u32 z, header_z, payload_z, irq;
1961 u32 payload_index, payload_end_index, next_page_index;
1962 int page, end_page, i, length, offset;
1965 * FIXME: Cycle lost behavior should be configurable: lose
1966 * packet, retransmit or terminate..
1970 payload_index = payload;
1976 if (p->header_length > 0)
1979 /* Determine the first page the payload isn't contained in. */
1980 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1981 if (p->payload_length > 0)
1982 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1988 /* Get header size in number of descriptors. */
1989 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
1991 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1996 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1997 d[0].req_count = cpu_to_le16(8);
1999 header = (__le32 *) &d[1];
2000 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2001 IT_HEADER_TAG(p->tag) |
2002 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2003 IT_HEADER_CHANNEL(ctx->base.channel) |
2004 IT_HEADER_SPEED(ctx->base.speed));
2006 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2007 p->payload_length));
2010 if (p->header_length > 0) {
2011 d[2].req_count = cpu_to_le16(p->header_length);
2012 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2013 memcpy(&d[z], p->header, p->header_length);
2016 pd = d + z - payload_z;
2017 payload_end_index = payload_index + p->payload_length;
2018 for (i = 0; i < payload_z; i++) {
2019 page = payload_index >> PAGE_SHIFT;
2020 offset = payload_index & ~PAGE_MASK;
2021 next_page_index = (page + 1) << PAGE_SHIFT;
2023 min(next_page_index, payload_end_index) - payload_index;
2024 pd[i].req_count = cpu_to_le16(length);
2026 page_bus = page_private(buffer->pages[page]);
2027 pd[i].data_address = cpu_to_le32(page_bus + offset);
2029 payload_index += length;
2033 irq = DESCRIPTOR_IRQ_ALWAYS;
2035 irq = DESCRIPTOR_NO_IRQ;
2037 last = z == 2 ? d : d + z - 1;
2038 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2040 DESCRIPTOR_BRANCH_ALWAYS |
2043 context_append(&ctx->context, d, z, header_z);
2049 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2050 struct fw_iso_packet *packet,
2051 struct fw_iso_buffer *buffer,
2052 unsigned long payload)
2054 struct iso_context *ctx = container_of(base, struct iso_context, base);
2055 struct db_descriptor *db = NULL;
2056 struct descriptor *d;
2057 struct fw_iso_packet *p;
2058 dma_addr_t d_bus, page_bus;
2059 u32 z, header_z, length, rest;
2060 int page, offset, packet_count, header_size;
2063 * FIXME: Cycle lost behavior should be configurable: lose
2064 * packet, retransmit or terminate..
2071 * The OHCI controller puts the status word in the header
2072 * buffer too, so we need 4 extra bytes per packet.
2074 packet_count = p->header_length / ctx->base.header_size;
2075 header_size = packet_count * (ctx->base.header_size + 4);
2077 /* Get header size in number of descriptors. */
2078 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2079 page = payload >> PAGE_SHIFT;
2080 offset = payload & ~PAGE_MASK;
2081 rest = p->payload_length;
2083 /* FIXME: make packet-per-buffer/dual-buffer a context option */
2085 d = context_get_descriptors(&ctx->context,
2086 z + header_z, &d_bus);
2090 db = (struct db_descriptor *) d;
2091 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2092 DESCRIPTOR_BRANCH_ALWAYS);
2093 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
2094 if (p->skip && rest == p->payload_length) {
2095 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2096 db->first_req_count = db->first_size;
2098 db->first_req_count = cpu_to_le16(header_size);
2100 db->first_res_count = db->first_req_count;
2101 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2103 if (p->skip && rest == p->payload_length)
2105 else if (offset + rest < PAGE_SIZE)
2108 length = PAGE_SIZE - offset;
2110 db->second_req_count = cpu_to_le16(length);
2111 db->second_res_count = db->second_req_count;
2112 page_bus = page_private(buffer->pages[page]);
2113 db->second_buffer = cpu_to_le32(page_bus + offset);
2115 if (p->interrupt && length == rest)
2116 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2118 context_append(&ctx->context, d, z, header_z);
2119 offset = (offset + length) & ~PAGE_MASK;
2129 ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2130 struct fw_iso_packet *packet,
2131 struct fw_iso_buffer *buffer,
2132 unsigned long payload)
2134 struct iso_context *ctx = container_of(base, struct iso_context, base);
2135 struct descriptor *d = NULL, *pd = NULL;
2136 struct fw_iso_packet *p = packet;
2137 dma_addr_t d_bus, page_bus;
2138 u32 z, header_z, rest;
2140 int page, offset, packet_count, header_size, payload_per_buffer;
2143 * The OHCI controller puts the status word in the
2144 * buffer too, so we need 4 extra bytes per packet.
2146 packet_count = p->header_length / ctx->base.header_size;
2147 header_size = ctx->base.header_size + 4;
2149 /* Get header size in number of descriptors. */
2150 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2151 page = payload >> PAGE_SHIFT;
2152 offset = payload & ~PAGE_MASK;
2153 payload_per_buffer = p->payload_length / packet_count;
2155 for (i = 0; i < packet_count; i++) {
2156 /* d points to the header descriptor */
2157 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2158 d = context_get_descriptors(&ctx->context,
2159 z + header_z, &d_bus);
2163 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2164 DESCRIPTOR_INPUT_MORE);
2165 if (p->skip && i == 0)
2166 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2167 d->req_count = cpu_to_le16(header_size);
2168 d->res_count = d->req_count;
2169 d->transfer_status = 0;
2170 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2172 rest = payload_per_buffer;
2173 for (j = 1; j < z; j++) {
2175 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2176 DESCRIPTOR_INPUT_MORE);
2178 if (offset + rest < PAGE_SIZE)
2181 length = PAGE_SIZE - offset;
2182 pd->req_count = cpu_to_le16(length);
2183 pd->res_count = pd->req_count;
2184 pd->transfer_status = 0;
2186 page_bus = page_private(buffer->pages[page]);
2187 pd->data_address = cpu_to_le32(page_bus + offset);
2189 offset = (offset + length) & ~PAGE_MASK;
2194 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2195 DESCRIPTOR_INPUT_LAST |
2196 DESCRIPTOR_BRANCH_ALWAYS);
2197 if (p->interrupt && i == packet_count - 1)
2198 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2200 context_append(&ctx->context, d, z, header_z);
2207 ohci_queue_iso(struct fw_iso_context *base,
2208 struct fw_iso_packet *packet,
2209 struct fw_iso_buffer *buffer,
2210 unsigned long payload)
2212 struct iso_context *ctx = container_of(base, struct iso_context, base);
2213 unsigned long flags;
2216 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2217 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2218 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
2219 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
2220 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
2223 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2226 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2231 static const struct fw_card_driver ohci_driver = {
2232 .name = ohci_driver_name,
2233 .enable = ohci_enable,
2234 .update_phy_reg = ohci_update_phy_reg,
2235 .set_config_rom = ohci_set_config_rom,
2236 .send_request = ohci_send_request,
2237 .send_response = ohci_send_response,
2238 .cancel_packet = ohci_cancel_packet,
2239 .enable_phys_dma = ohci_enable_phys_dma,
2240 .get_bus_time = ohci_get_bus_time,
2242 .allocate_iso_context = ohci_allocate_iso_context,
2243 .free_iso_context = ohci_free_iso_context,
2244 .queue_iso = ohci_queue_iso,
2245 .start_iso = ohci_start_iso,
2246 .stop_iso = ohci_stop_iso,
2249 #ifdef CONFIG_PPC_PMAC
2250 static void ohci_pmac_on(struct pci_dev *dev)
2252 if (machine_is(powermac)) {
2253 struct device_node *ofn = pci_device_to_OF_node(dev);
2256 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2257 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2262 static void ohci_pmac_off(struct pci_dev *dev)
2264 if (machine_is(powermac)) {
2265 struct device_node *ofn = pci_device_to_OF_node(dev);
2268 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2269 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2274 #define ohci_pmac_on(dev)
2275 #define ohci_pmac_off(dev)
2276 #endif /* CONFIG_PPC_PMAC */
2278 static int __devinit
2279 pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2281 struct fw_ohci *ohci;
2282 u32 bus_options, max_receive, link_speed;
2289 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2291 fw_error("Could not malloc fw_ohci data.\n");
2295 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2297 err = pci_enable_device(dev);
2299 fw_error("Failed to enable OHCI hardware.\n");
2303 pci_set_master(dev);
2304 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2305 pci_set_drvdata(dev, ohci);
2307 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2308 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2309 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2311 spin_lock_init(&ohci->lock);
2313 tasklet_init(&ohci->bus_reset_tasklet,
2314 bus_reset_tasklet, (unsigned long)ohci);
2316 err = pci_request_region(dev, 0, ohci_driver_name);
2318 fw_error("MMIO resource unavailable\n");
2322 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2323 if (ohci->registers == NULL) {
2324 fw_error("Failed to remap registers\n");
2329 ar_context_init(&ohci->ar_request_ctx, ohci,
2330 OHCI1394_AsReqRcvContextControlSet);
2332 ar_context_init(&ohci->ar_response_ctx, ohci,
2333 OHCI1394_AsRspRcvContextControlSet);
2335 context_init(&ohci->at_request_ctx, ohci,
2336 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2338 context_init(&ohci->at_response_ctx, ohci,
2339 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2341 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2342 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2343 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2344 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2345 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2347 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2348 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2349 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2350 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2351 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2353 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2354 fw_error("Out of memory for it/ir contexts.\n");
2356 goto fail_registers;
2359 /* self-id dma buffer allocation */
2360 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2364 if (ohci->self_id_cpu == NULL) {
2365 fw_error("Out of memory for self ID buffer.\n");
2367 goto fail_registers;
2370 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2371 max_receive = (bus_options >> 12) & 0xf;
2372 link_speed = bus_options & 0x7;
2373 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2374 reg_read(ohci, OHCI1394_GUIDLo);
2376 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2380 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2381 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2382 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
2386 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2387 ohci->self_id_cpu, ohci->self_id_bus);
2389 kfree(ohci->it_context_list);
2390 kfree(ohci->ir_context_list);
2391 pci_iounmap(dev, ohci->registers);
2393 pci_release_region(dev, 0);
2395 pci_disable_device(dev);
2402 static void pci_remove(struct pci_dev *dev)
2404 struct fw_ohci *ohci;
2406 ohci = pci_get_drvdata(dev);
2407 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2409 fw_core_remove_card(&ohci->card);
2412 * FIXME: Fail all pending packets here, now that the upper
2413 * layers can't queue any more.
2416 software_reset(ohci);
2417 free_irq(dev->irq, ohci);
2418 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2419 ohci->self_id_cpu, ohci->self_id_bus);
2420 kfree(ohci->it_context_list);
2421 kfree(ohci->ir_context_list);
2422 pci_iounmap(dev, ohci->registers);
2423 pci_release_region(dev, 0);
2424 pci_disable_device(dev);
2428 fw_notify("Removed fw-ohci device.\n");
2432 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2434 struct fw_ohci *ohci = pci_get_drvdata(dev);
2437 software_reset(ohci);
2438 free_irq(dev->irq, ohci);
2439 err = pci_save_state(dev);
2441 fw_error("pci_save_state failed\n");
2444 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2446 fw_error("pci_set_power_state failed with %d\n", err);
2452 static int pci_resume(struct pci_dev *dev)
2454 struct fw_ohci *ohci = pci_get_drvdata(dev);
2458 pci_set_power_state(dev, PCI_D0);
2459 pci_restore_state(dev);
2460 err = pci_enable_device(dev);
2462 fw_error("pci_enable_device failed\n");
2466 return ohci_enable(&ohci->card, NULL, 0);
2470 static struct pci_device_id pci_table[] = {
2471 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2475 MODULE_DEVICE_TABLE(pci, pci_table);
2477 static struct pci_driver fw_ohci_pci_driver = {
2478 .name = ohci_driver_name,
2479 .id_table = pci_table,
2481 .remove = pci_remove,
2483 .resume = pci_resume,
2484 .suspend = pci_suspend,
2488 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2489 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2490 MODULE_LICENSE("GPL");
2492 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2493 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2494 MODULE_ALIAS("ohci1394");
2497 static int __init fw_ohci_init(void)
2499 return pci_register_driver(&fw_ohci_pci_driver);
2502 static void __exit fw_ohci_cleanup(void)
2504 pci_unregister_driver(&fw_ohci_pci_driver);
2507 module_init(fw_ohci_init);
2508 module_exit(fw_ohci_cleanup);