2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
140 #define DBGMSG(fmt, args...)
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 /* Module Parameters */
165 static int phys_dma = 1;
166 module_param(phys_dma, int, 0644);
167 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
169 static void dma_trm_tasklet(unsigned long data);
170 static void dma_trm_reset(struct dma_trm_ctx *d);
172 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
173 enum context_type type, int ctx, int num_desc,
174 int buf_size, int split_buf_size, int context_base);
175 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
176 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
178 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
179 enum context_type type, int ctx, int num_desc,
182 static void ohci1394_pci_remove(struct pci_dev *pdev);
184 #ifndef __LITTLE_ENDIAN
185 static unsigned hdr_sizes[] =
187 3, /* TCODE_WRITEQ */
188 4, /* TCODE_WRITEB */
189 3, /* TCODE_WRITE_RESPONSE */
193 3, /* TCODE_READQ_RESPONSE */
194 4, /* TCODE_READB_RESPONSE */
195 1, /* TCODE_CYCLE_START (???) */
196 4, /* TCODE_LOCK_REQUEST */
197 2, /* TCODE_ISO_DATA */
198 4, /* TCODE_LOCK_RESPONSE */
202 static inline void packet_swab(quadlet_t *data, int tcode)
204 size_t size = hdr_sizes[tcode];
206 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210 data[size] = swab32(data[size]);
213 /* Don't waste cycles on same sex byte swaps */
214 #define packet_swab(w,x)
215 #endif /* !LITTLE_ENDIAN */
217 /***********************************
218 * IEEE-1394 functionality section *
219 ***********************************/
221 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
227 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
229 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
231 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
232 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
238 r = reg_read(ohci, OHCI1394_PhyControl);
240 if (i >= OHCI_LOOP_COUNT)
241 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
242 r, r & 0x80000000, i);
244 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
246 return (r & 0x00ff0000) >> 16;
249 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
255 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
257 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
259 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
260 r = reg_read(ohci, OHCI1394_PhyControl);
261 if (!(r & 0x00004000))
267 if (i == OHCI_LOOP_COUNT)
268 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
269 r, r & 0x00004000, i);
271 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
276 /* Or's our value into the current value */
277 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 old = get_phy_reg (ohci, addr);
283 set_phy_reg (ohci, addr, old);
288 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
289 int phyid, int isroot)
291 quadlet_t *q = ohci->selfid_buf_cpu;
292 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296 /* Check status of self-id reception */
298 if (ohci->selfid_swap)
299 q0 = le32_to_cpu(q[0]);
303 if ((self_id_count & 0x80000000) ||
304 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
306 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
307 self_id_count, q0, ohci->self_id_errors);
309 /* Tip by James Goodwin <jamesg@Filanet.com>:
310 * We had an error, generate another bus reset in response. */
311 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
312 set_phy_reg_mask (ohci, 1, 0x40);
313 ohci->self_id_errors++;
316 "Too many errors on SelfID error reception, giving up!");
321 /* SelfID Ok, reset error counter. */
322 ohci->self_id_errors = 0;
324 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328 if (ohci->selfid_swap) {
329 q0 = le32_to_cpu(q[0]);
330 q1 = le32_to_cpu(q[1]);
337 DBGMSG ("SelfID packet 0x%x received", q0);
338 hpsb_selfid_received(host, cpu_to_be32(q0));
339 if (((q0 & 0x3f000000) >> 24) == phyid)
340 DBGMSG ("SelfID for this node is 0x%08x", q0);
343 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
349 DBGMSG("SelfID complete");
354 static void ohci_soft_reset(struct ti_ohci *ohci) {
357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
359 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
360 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364 DBGMSG ("Soft reset finished");
368 /* Generate the dma receive prgs and start the context */
369 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
371 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
374 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
376 for (i=0; i<d->num_desc; i++) {
379 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
385 /* End of descriptor list? */
386 if (i + 1 < d->num_desc) {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
394 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
395 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
401 if (d->type == DMA_CTX_ISO) {
402 /* Clear contextControl */
403 reg_write(ohci, d->ctrlClear, 0xffffffff);
405 /* Set bufferFill, isochHeader, multichannel for IR context */
406 reg_write(ohci, d->ctrlSet, 0xd0000000);
408 /* Set the context match register to match on all tags */
409 reg_write(ohci, d->ctxtMatch, 0xf0000000);
411 /* Clear the multi channel mask high and low registers */
412 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
413 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
415 /* Set up isoRecvIntMask to generate interrupts */
416 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
419 /* Tell the controller where the first AR program is */
420 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
423 reg_write(ohci, d->ctrlSet, 0x00008000);
425 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
428 /* Initialize the dma transmit context */
429 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
431 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
433 /* Stop the context */
434 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438 d->free_prgs = d->num_desc;
439 d->branchAddrPtr = NULL;
440 INIT_LIST_HEAD(&d->fifo_list);
441 INIT_LIST_HEAD(&d->pending_list);
443 if (d->type == DMA_CTX_ISO) {
444 /* enable interrupts */
445 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
448 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
451 /* Count the number of available iso contexts */
452 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
457 reg_write(ohci, reg, 0xffffffff);
458 tmp = reg_read(ohci, reg);
460 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
462 /* Count the number of contexts */
463 for (i=0; i<32; i++) {
470 /* Global initialization */
471 static void ohci_initialize(struct ti_ohci *ohci)
477 spin_lock_init(&ohci->phy_reg_lock);
479 /* Put some defaults to these undefined bus options */
480 buf = reg_read(ohci, OHCI1394_BusOptions);
481 buf |= 0x60000000; /* Enable CMC and ISC */
482 if (hpsb_disable_irm)
485 buf |= 0x80000000; /* Enable IRMC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets if appropriate. */
501 reg_write(ohci, OHCI1394_LinkControlSet,
502 OHCI1394_LinkControl_CycleTimerEnable |
503 OHCI1394_LinkControl_CycleMaster);
504 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
505 if (hpsb_disable_irm)
506 i &= ~PHY_04_CONTENDER;
508 i |= PHY_04_CONTENDER;
509 set_phy_reg(ohci, 4, i);
511 /* Set up self-id dma buffer */
512 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
514 /* enable self-id and phys */
515 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
516 OHCI1394_LinkControl_RcvPhyPkt);
518 /* Set the Config ROM mapping register */
519 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
521 /* Now get our max packet size */
522 ohci->max_packet_size =
523 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
525 /* Don't accept phy packets into AR request context */
526 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
532 /* Clear the interrupt mask */
533 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
536 /* Initialize AR dma */
537 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
540 /* Initialize AT dma */
541 initialize_dma_trm_ctx(&ohci->at_req_context);
542 initialize_dma_trm_ctx(&ohci->at_resp_context);
544 /* Initialize IR Legacy DMA channel mask */
545 ohci->ir_legacy_channels = 0;
547 /* Accept AR requests from all nodes */
548 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
550 /* Set the address range of the physical response unit.
551 * Most controllers do not implement it as a writable register though.
552 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */
556 reg_write(ohci, OHCI1394_PhyUpperBound,
557 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
559 DBGMSG("physUpperBoundOffset=%08x",
560 reg_read(ohci, OHCI1394_PhyUpperBound));
562 /* Specify AT retries */
563 reg_write(ohci, OHCI1394_ATRetries,
564 OHCI1394_MAX_AT_REQ_RETRIES |
565 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
566 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
568 /* We don't want hardware swapping */
569 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
571 /* Enable interrupts */
572 reg_write(ohci, OHCI1394_IntMaskSet,
573 OHCI1394_unrecoverableError |
574 OHCI1394_masterIntEnable |
576 OHCI1394_selfIDComplete |
579 OHCI1394_respTxComplete |
580 OHCI1394_reqTxComplete |
583 OHCI1394_postedWriteErr |
584 OHCI1394_cycleTooLong |
585 OHCI1394_cycleInconsistent);
588 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
590 buf = reg_read(ohci, OHCI1394_Version);
592 sprintf (irq_buf, "%d", ohci->dev->irq);
594 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
596 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
597 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
598 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
599 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
600 pci_resource_start(ohci->dev, 0),
601 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
602 ohci->max_packet_size,
603 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
605 /* Check all of our ports to make sure that if anything is
606 * connected, we enable that port. */
607 num_ports = get_phy_reg(ohci, 2) & 0xf;
608 for (i = 0; i < num_ports; i++) {
611 set_phy_reg(ohci, 7, i);
612 status = get_phy_reg(ohci, 8);
615 set_phy_reg(ohci, 8, status & ~1);
618 /* Serial EEPROM Sanity check. */
619 if ((ohci->max_packet_size < 512) ||
620 (ohci->max_packet_size > 4096)) {
621 /* Serial EEPROM contents are suspect, set a sane max packet
622 * size and print the raw contents for bug reports if verbose
623 * debug is enabled. */
624 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
628 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
629 "attempting to setting max_packet_size to 512 bytes");
630 reg_write(ohci, OHCI1394_BusOptions,
631 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
632 ohci->max_packet_size = 512;
633 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
634 PRINT(KERN_DEBUG, " EEPROM Present: %d",
635 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
636 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
640 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
643 for (i = 0; i < 0x20; i++) {
644 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
645 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
646 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
653 * Insert a packet in the DMA fifo and generate the DMA prg
654 * FIXME: rewrite the program in order to accept packets crossing
656 * check also that a single dma descriptor doesn't cross a
659 static void insert_packet(struct ti_ohci *ohci,
660 struct dma_trm_ctx *d, struct hpsb_packet *packet)
663 int idx = d->prg_ind;
665 DBGMSG("Inserting packet for node " NODE_BUS_FMT
666 ", tlabel=%d, tcode=0x%x, speed=%d",
667 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
668 packet->tcode, packet->speed_code);
670 d->prg_cpu[idx]->begin.address = 0;
671 d->prg_cpu[idx]->begin.branchAddress = 0;
673 if (d->type == DMA_CTX_ASYNC_RESP) {
675 * For response packets, we need to put a timeout value in
676 * the 16 lower bits of the status... let's try 1 sec timeout
678 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
679 d->prg_cpu[idx]->begin.status = cpu_to_le32(
680 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
681 ((cycleTimer&0x01fff000)>>12));
683 DBGMSG("cycleTimer: %08x timeStamp: %08x",
684 cycleTimer, d->prg_cpu[idx]->begin.status);
686 d->prg_cpu[idx]->begin.status = 0;
688 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
690 if (packet->type == hpsb_raw) {
691 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
692 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
693 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
695 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
696 (packet->header[0] & 0xFFFF);
698 if (packet->tcode == TCODE_ISO_DATA) {
699 /* Sending an async stream packet */
700 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
702 /* Sending a normal async request or response */
703 d->prg_cpu[idx]->data[1] =
704 (packet->header[1] & 0xFFFF) |
705 (packet->header[0] & 0xFFFF0000);
706 d->prg_cpu[idx]->data[2] = packet->header[2];
707 d->prg_cpu[idx]->data[3] = packet->header[3];
709 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
712 if (packet->data_size) { /* block transmit */
713 if (packet->tcode == TCODE_STREAM_DATA){
714 d->prg_cpu[idx]->begin.control =
715 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
716 DMA_CTL_IMMEDIATE | 0x8);
718 d->prg_cpu[idx]->begin.control =
719 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
720 DMA_CTL_IMMEDIATE | 0x10);
722 d->prg_cpu[idx]->end.control =
723 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
728 * Check that the packet data buffer
729 * does not cross a page boundary.
731 * XXX Fix this some day. eth1394 seems to trigger
732 * it, but ignoring it doesn't seem to cause a
736 if (cross_bound((unsigned long)packet->data,
737 packet->data_size)>0) {
738 /* FIXME: do something about it */
740 "%s: packet data addr: %p size %Zd bytes "
741 "cross page boundary", __FUNCTION__,
742 packet->data, packet->data_size);
745 d->prg_cpu[idx]->end.address = cpu_to_le32(
746 pci_map_single(ohci->dev, packet->data,
749 OHCI_DMA_ALLOC("single, block transmit packet");
751 d->prg_cpu[idx]->end.branchAddress = 0;
752 d->prg_cpu[idx]->end.status = 0;
753 if (d->branchAddrPtr)
754 *(d->branchAddrPtr) =
755 cpu_to_le32(d->prg_bus[idx] | 0x3);
757 &(d->prg_cpu[idx]->end.branchAddress);
758 } else { /* quadlet transmit */
759 if (packet->type == hpsb_raw)
760 d->prg_cpu[idx]->begin.control =
761 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
765 (packet->header_size + 4));
767 d->prg_cpu[idx]->begin.control =
768 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
772 packet->header_size);
774 if (d->branchAddrPtr)
775 *(d->branchAddrPtr) =
776 cpu_to_le32(d->prg_bus[idx] | 0x2);
778 &(d->prg_cpu[idx]->begin.branchAddress);
781 } else { /* iso packet */
782 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
783 (packet->header[0] & 0xFFFF);
784 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
785 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
787 d->prg_cpu[idx]->begin.control =
788 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
789 DMA_CTL_IMMEDIATE | 0x8);
790 d->prg_cpu[idx]->end.control =
791 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
796 d->prg_cpu[idx]->end.address = cpu_to_le32(
797 pci_map_single(ohci->dev, packet->data,
798 packet->data_size, PCI_DMA_TODEVICE));
799 OHCI_DMA_ALLOC("single, iso transmit packet");
801 d->prg_cpu[idx]->end.branchAddress = 0;
802 d->prg_cpu[idx]->end.status = 0;
803 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
804 " begin=%08x %08x %08x %08x\n"
805 " %08x %08x %08x %08x\n"
806 " end =%08x %08x %08x %08x",
807 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
808 d->prg_cpu[idx]->begin.control,
809 d->prg_cpu[idx]->begin.address,
810 d->prg_cpu[idx]->begin.branchAddress,
811 d->prg_cpu[idx]->begin.status,
812 d->prg_cpu[idx]->data[0],
813 d->prg_cpu[idx]->data[1],
814 d->prg_cpu[idx]->data[2],
815 d->prg_cpu[idx]->data[3],
816 d->prg_cpu[idx]->end.control,
817 d->prg_cpu[idx]->end.address,
818 d->prg_cpu[idx]->end.branchAddress,
819 d->prg_cpu[idx]->end.status);
820 if (d->branchAddrPtr)
821 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
822 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
826 /* queue the packet in the appropriate context queue */
827 list_add_tail(&packet->driver_list, &d->fifo_list);
828 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
832 * This function fills the FIFO with the (eventual) pending packets
833 * and runs or wakes up the DMA prg if necessary.
835 * The function MUST be called with the d->lock held.
837 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
839 struct hpsb_packet *packet, *ptmp;
840 int idx = d->prg_ind;
843 /* insert the packets into the dma fifo */
844 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
848 /* For the first packet only */
850 z = (packet->data_size) ? 3 : 2;
852 /* Insert the packet */
853 list_del_init(&packet->driver_list);
854 insert_packet(ohci, d, packet);
857 /* Nothing must have been done, either no free_prgs or no packets */
861 /* Is the context running ? (should be unless it is
862 the first packet to be sent in this context) */
863 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
864 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
866 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
867 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
869 /* Check that the node id is valid, and not 63 */
870 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
871 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
873 reg_write(ohci, d->ctrlSet, 0x8000);
875 /* Wake up the dma context if necessary */
876 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
877 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
879 /* do this always, to avoid race condition */
880 reg_write(ohci, d->ctrlSet, 0x1000);
886 /* Transmission of an async or iso packet */
887 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
889 struct ti_ohci *ohci = host->hostdata;
890 struct dma_trm_ctx *d;
893 if (packet->data_size > ohci->max_packet_size) {
895 "Transmit packet size %Zd is too big",
900 /* Decide whether we have an iso, a request, or a response packet */
901 if (packet->type == hpsb_raw)
902 d = &ohci->at_req_context;
903 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
904 /* The legacy IT DMA context is initialized on first
905 * use. However, the alloc cannot be run from
906 * interrupt context, so we bail out if that is the
907 * case. I don't see anyone sending ISO packets from
908 * interrupt context anyway... */
910 if (ohci->it_legacy_context.ohci == NULL) {
911 if (in_interrupt()) {
913 "legacy IT context cannot be initialized during interrupt");
917 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
918 DMA_CTX_ISO, 0, IT_NUM_DESC,
919 OHCI1394_IsoXmitContextBase) < 0) {
921 "error initializing legacy IT context");
925 initialize_dma_trm_ctx(&ohci->it_legacy_context);
928 d = &ohci->it_legacy_context;
929 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
930 d = &ohci->at_resp_context;
932 d = &ohci->at_req_context;
934 spin_lock_irqsave(&d->lock,flags);
936 list_add_tail(&packet->driver_list, &d->pending_list);
938 dma_trm_flush(ohci, d);
940 spin_unlock_irqrestore(&d->lock,flags);
945 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
947 struct ti_ohci *ohci = host->hostdata;
956 phy_reg = get_phy_reg(ohci, 5);
958 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
961 phy_reg = get_phy_reg(ohci, 1);
963 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
965 case SHORT_RESET_NO_FORCE_ROOT:
966 phy_reg = get_phy_reg(ohci, 1);
967 if (phy_reg & 0x80) {
969 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
972 phy_reg = get_phy_reg(ohci, 5);
974 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
976 case LONG_RESET_NO_FORCE_ROOT:
977 phy_reg = get_phy_reg(ohci, 1);
980 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
982 case SHORT_RESET_FORCE_ROOT:
983 phy_reg = get_phy_reg(ohci, 1);
984 if (!(phy_reg & 0x80)) {
986 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
989 phy_reg = get_phy_reg(ohci, 5);
991 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
993 case LONG_RESET_FORCE_ROOT:
994 phy_reg = get_phy_reg(ohci, 1);
996 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
1003 case GET_CYCLE_COUNTER:
1004 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1007 case SET_CYCLE_COUNTER:
1008 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1012 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1015 case ACT_CYCLE_MASTER:
1017 /* check if we are root and other nodes are present */
1018 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1019 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1021 * enable cycleTimer, cycleMaster
1023 DBGMSG("Cycle master enabled");
1024 reg_write(ohci, OHCI1394_LinkControlSet,
1025 OHCI1394_LinkControl_CycleTimerEnable |
1026 OHCI1394_LinkControl_CycleMaster);
1029 /* disable cycleTimer, cycleMaster, cycleSource */
1030 reg_write(ohci, OHCI1394_LinkControlClear,
1031 OHCI1394_LinkControl_CycleTimerEnable |
1032 OHCI1394_LinkControl_CycleMaster |
1033 OHCI1394_LinkControl_CycleSource);
1037 case CANCEL_REQUESTS:
1038 DBGMSG("Cancel request received");
1039 dma_trm_reset(&ohci->at_req_context);
1040 dma_trm_reset(&ohci->at_resp_context);
1043 case ISO_LISTEN_CHANNEL:
1046 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1047 int ir_legacy_active;
1049 if (arg<0 || arg>63) {
1051 "%s: IS0 listen channel %d is out of range",
1056 mask = (u64)0x1<<arg;
1058 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1060 if (ohci->ISO_channel_usage & mask) {
1062 "%s: IS0 listen channel %d is already used",
1064 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1068 ir_legacy_active = ohci->ir_legacy_channels;
1070 ohci->ISO_channel_usage |= mask;
1071 ohci->ir_legacy_channels |= mask;
1073 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1075 if (!ir_legacy_active) {
1076 if (ohci1394_register_iso_tasklet(ohci,
1077 &ohci->ir_legacy_tasklet) < 0) {
1078 PRINT(KERN_ERR, "No IR DMA context available");
1082 /* the IR context can be assigned to any DMA context
1083 * by ohci1394_register_iso_tasklet */
1084 d->ctx = ohci->ir_legacy_tasklet.context;
1085 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1087 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1089 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1090 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1092 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1094 if (printk_ratelimit())
1095 DBGMSG("IR legacy activated");
1098 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1101 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1104 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1107 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1108 DBGMSG("Listening enabled on channel %d", arg);
1111 case ISO_UNLISTEN_CHANNEL:
1115 if (arg<0 || arg>63) {
1117 "%s: IS0 unlisten channel %d is out of range",
1122 mask = (u64)0x1<<arg;
1124 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1126 if (!(ohci->ISO_channel_usage & mask)) {
1128 "%s: IS0 unlisten channel %d is not used",
1130 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1134 ohci->ISO_channel_usage &= ~mask;
1135 ohci->ir_legacy_channels &= ~mask;
1138 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1141 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1144 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1145 DBGMSG("Listening disabled on channel %d", arg);
1147 if (ohci->ir_legacy_channels == 0) {
1148 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1149 DBGMSG("ISO legacy receive context stopped");
1155 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1162 /***********************************
1163 * rawiso ISO reception *
1164 ***********************************/
1167 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1168 buffer is split into "blocks" (regions described by one DMA
1169 descriptor). Each block must be one page or less in size, and
1170 must not cross a page boundary.
1172 There is one little wrinkle with buffer-fill mode: a packet that
1173 starts in the final block may wrap around into the first block. But
1174 the user API expects all packets to be contiguous. Our solution is
1175 to keep the very last page of the DMA buffer in reserve - if a
1176 packet spans the gap, we copy its tail into this page.
1179 struct ohci_iso_recv {
1180 struct ti_ohci *ohci;
1182 struct ohci1394_iso_tasklet task;
1185 enum { BUFFER_FILL_MODE = 0,
1186 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1188 /* memory and PCI mapping for the DMA descriptors */
1189 struct dma_prog_region prog;
1190 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1192 /* how many DMA blocks fit in the buffer */
1193 unsigned int nblocks;
1195 /* stride of DMA blocks */
1196 unsigned int buf_stride;
1198 /* number of blocks to batch between interrupts */
1199 int block_irq_interval;
1201 /* block that DMA will finish next */
1204 /* (buffer-fill only) block that the reader will release next */
1207 /* (buffer-fill only) bytes of buffer the reader has released,
1208 less than one block */
1211 /* (buffer-fill only) buffer offset at which the next packet will appear */
1214 /* OHCI DMA context control registers */
1215 u32 ContextControlSet;
1216 u32 ContextControlClear;
1221 static void ohci_iso_recv_task(unsigned long data);
1222 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1223 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1224 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1225 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1227 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1229 struct ti_ohci *ohci = iso->host->hostdata;
1230 struct ohci_iso_recv *recv;
1234 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1238 iso->hostdata = recv;
1240 recv->task_active = 0;
1241 dma_prog_region_init(&recv->prog);
1244 /* use buffer-fill mode, unless irq_interval is 1
1245 (note: multichannel requires buffer-fill) */
1247 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1248 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1249 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1251 recv->dma_mode = BUFFER_FILL_MODE;
1254 /* set nblocks, buf_stride, block_irq_interval */
1256 if (recv->dma_mode == BUFFER_FILL_MODE) {
1257 recv->buf_stride = PAGE_SIZE;
1259 /* one block per page of data in the DMA buffer, minus the final guard page */
1260 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1261 if (recv->nblocks < 3) {
1262 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1266 /* iso->irq_interval is in packets - translate that to blocks */
1267 if (iso->irq_interval == 1)
1268 recv->block_irq_interval = 1;
1270 recv->block_irq_interval = iso->irq_interval *
1271 ((recv->nblocks+1)/iso->buf_packets);
1272 if (recv->block_irq_interval*4 > recv->nblocks)
1273 recv->block_irq_interval = recv->nblocks/4;
1274 if (recv->block_irq_interval < 1)
1275 recv->block_irq_interval = 1;
1278 int max_packet_size;
1280 recv->nblocks = iso->buf_packets;
1281 recv->block_irq_interval = iso->irq_interval;
1282 if (recv->block_irq_interval * 4 > iso->buf_packets)
1283 recv->block_irq_interval = iso->buf_packets / 4;
1284 if (recv->block_irq_interval < 1)
1285 recv->block_irq_interval = 1;
1287 /* choose a buffer stride */
1288 /* must be a power of 2, and <= PAGE_SIZE */
1290 max_packet_size = iso->buf_size / iso->buf_packets;
1292 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1293 recv->buf_stride *= 2);
1295 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1296 recv->buf_stride > PAGE_SIZE) {
1297 /* this shouldn't happen, but anyway... */
1298 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1303 recv->block_reader = 0;
1304 recv->released_bytes = 0;
1305 recv->block_dma = 0;
1306 recv->dma_offset = 0;
1308 /* size of DMA program = one descriptor per block */
1309 if (dma_prog_region_alloc(&recv->prog,
1310 sizeof(struct dma_cmd) * recv->nblocks,
1314 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1316 ohci1394_init_iso_tasklet(&recv->task,
1317 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1319 ohci_iso_recv_task, (unsigned long) iso);
1321 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1326 recv->task_active = 1;
1328 /* recv context registers are spaced 32 bytes apart */
1329 ctx = recv->task.context;
1330 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1331 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1332 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1333 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1335 if (iso->channel == -1) {
1336 /* clear multi-channel selection mask */
1337 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1338 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1341 /* write the DMA program */
1342 ohci_iso_recv_program(iso);
1344 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1345 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1346 recv->dma_mode == BUFFER_FILL_MODE ?
1347 "buffer-fill" : "packet-per-buffer",
1348 iso->buf_size/PAGE_SIZE, iso->buf_size,
1349 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1354 ohci_iso_recv_shutdown(iso);
1358 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1360 struct ohci_iso_recv *recv = iso->hostdata;
1362 /* disable interrupts */
1363 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1366 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1369 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1371 struct ohci_iso_recv *recv = iso->hostdata;
1373 if (recv->task_active) {
1374 ohci_iso_recv_stop(iso);
1375 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1376 recv->task_active = 0;
1379 dma_prog_region_free(&recv->prog);
1381 iso->hostdata = NULL;
1384 /* set up a "gapped" ring buffer DMA program */
1385 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1387 struct ohci_iso_recv *recv = iso->hostdata;
1390 /* address of 'branch' field in previous DMA descriptor */
1391 u32 *prev_branch = NULL;
1393 for (blk = 0; blk < recv->nblocks; blk++) {
1396 /* the DMA descriptor */
1397 struct dma_cmd *cmd = &recv->block[blk];
1399 /* offset of the DMA descriptor relative to the DMA prog buffer */
1400 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1402 /* offset of this packet's data within the DMA buffer */
1403 unsigned long buf_offset = blk * recv->buf_stride;
1405 if (recv->dma_mode == BUFFER_FILL_MODE) {
1406 control = 2 << 28; /* INPUT_MORE */
1408 control = 3 << 28; /* INPUT_LAST */
1411 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1413 /* interrupt on last block, and at intervals */
1414 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1415 control |= 3 << 20; /* want interrupt */
1418 control |= 3 << 18; /* enable branch to address */
1419 control |= recv->buf_stride;
1421 cmd->control = cpu_to_le32(control);
1422 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1423 cmd->branchAddress = 0; /* filled in on next loop */
1424 cmd->status = cpu_to_le32(recv->buf_stride);
1426 /* link the previous descriptor to this one */
1428 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1431 prev_branch = &cmd->branchAddress;
1434 /* the final descriptor's branch address and Z should be left at 0 */
1437 /* listen or unlisten to a specific channel (multi-channel mode only) */
1438 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1440 struct ohci_iso_recv *recv = iso->hostdata;
1444 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1447 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1451 reg_write(recv->ohci, reg, (1 << i));
1453 /* issue a dummy read to force all PCI writes to be posted immediately */
1455 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1458 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1460 struct ohci_iso_recv *recv = iso->hostdata;
1463 for (i = 0; i < 64; i++) {
1464 if (mask & (1ULL << i)) {
1466 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1468 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1471 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1473 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1477 /* issue a dummy read to force all PCI writes to be posted immediately */
1479 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1482 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1484 struct ohci_iso_recv *recv = iso->hostdata;
1485 struct ti_ohci *ohci = recv->ohci;
1486 u32 command, contextMatch;
1488 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1491 /* always keep ISO headers */
1492 command = (1 << 30);
1494 if (recv->dma_mode == BUFFER_FILL_MODE)
1495 command |= (1 << 31);
1497 reg_write(recv->ohci, recv->ContextControlSet, command);
1499 /* match on specified tags */
1500 contextMatch = tag_mask << 28;
1502 if (iso->channel == -1) {
1503 /* enable multichannel reception */
1504 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1506 /* listen on channel */
1507 contextMatch |= iso->channel;
1513 /* enable cycleMatch */
1514 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1516 /* set starting cycle */
1519 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1520 just snarf them from the current time */
1521 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1523 /* advance one second to give some extra time for DMA to start */
1526 cycle |= (seconds & 3) << 13;
1528 contextMatch |= cycle << 12;
1532 /* set sync flag on first DMA descriptor */
1533 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1534 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1536 /* match sync field */
1537 contextMatch |= (sync&0xf)<<8;
1540 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1542 /* address of first descriptor block */
1543 command = dma_prog_region_offset_to_bus(&recv->prog,
1544 recv->block_dma * sizeof(struct dma_cmd));
1545 command |= 1; /* Z=1 */
1547 reg_write(recv->ohci, recv->CommandPtr, command);
1549 /* enable interrupts */
1550 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1555 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1557 /* issue a dummy read of the cycle timer register to force
1558 all PCI writes to be posted immediately */
1560 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1563 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1565 "Error starting IR DMA (ContextControl 0x%08x)\n",
1566 reg_read(recv->ohci, recv->ContextControlSet));
1573 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1575 /* re-use the DMA descriptor for the block */
1576 /* by linking the previous descriptor to it */
1579 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1581 struct dma_cmd *next = &recv->block[next_i];
1582 struct dma_cmd *prev = &recv->block[prev_i];
1584 /* ignore out-of-range requests */
1585 if ((block < 0) || (block > recv->nblocks))
1588 /* 'next' becomes the new end of the DMA chain,
1589 so disable branch and enable interrupt */
1590 next->branchAddress = 0;
1591 next->control |= cpu_to_le32(3 << 20);
1592 next->status = cpu_to_le32(recv->buf_stride);
1594 /* link prev to next */
1595 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1596 sizeof(struct dma_cmd) * next_i)
1599 /* disable interrupt on previous DMA descriptor, except at intervals */
1600 if ((prev_i % recv->block_irq_interval) == 0) {
1601 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1603 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1607 /* wake up DMA in case it fell asleep */
1608 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1611 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1612 struct hpsb_iso_packet_info *info)
1614 /* release the memory where the packet was */
1615 recv->released_bytes += info->total_len;
1617 /* have we released enough memory for one block? */
1618 while (recv->released_bytes > recv->buf_stride) {
1619 ohci_iso_recv_release_block(recv, recv->block_reader);
1620 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1621 recv->released_bytes -= recv->buf_stride;
1625 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1627 struct ohci_iso_recv *recv = iso->hostdata;
1628 if (recv->dma_mode == BUFFER_FILL_MODE) {
1629 ohci_iso_recv_bufferfill_release(recv, info);
1631 ohci_iso_recv_release_block(recv, info - iso->infos);
1635 /* parse all packets from blocks that have been fully received */
1636 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1640 struct ti_ohci *ohci = recv->ohci;
1643 /* we expect the next parsable packet to begin at recv->dma_offset */
1644 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1646 unsigned int offset;
1647 unsigned short len, cycle, total_len;
1648 unsigned char channel, tag, sy;
1650 unsigned char *p = iso->data_buf.kvirt;
1652 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1654 /* don't loop indefinitely */
1655 if (runaway++ > 100000) {
1656 atomic_inc(&iso->overflows);
1658 "IR DMA error - Runaway during buffer parsing!\n");
1662 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1663 if (this_block == recv->block_dma)
1668 /* parse data length, tag, channel, and sy */
1670 /* note: we keep our own local copies of 'len' and 'offset'
1671 so the user can't mess with them by poking in the mmap area */
1673 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1677 "IR DMA error - bogus 'len' value %u\n", len);
1680 channel = p[recv->dma_offset+1] & 0x3F;
1681 tag = p[recv->dma_offset+1] >> 6;
1682 sy = p[recv->dma_offset+0] & 0xF;
1684 /* advance to data payload */
1685 recv->dma_offset += 4;
1687 /* check for wrap-around */
1688 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1689 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1692 /* dma_offset now points to the first byte of the data payload */
1693 offset = recv->dma_offset;
1695 /* advance to xferStatus/timeStamp */
1696 recv->dma_offset += len;
1698 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1699 /* payload is padded to 4 bytes */
1701 recv->dma_offset += 4 - (len%4);
1702 total_len += 4 - (len%4);
1705 /* check for wrap-around */
1706 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1707 /* uh oh, the packet data wraps from the last
1708 to the first DMA block - make the packet
1709 contiguous by copying its "tail" into the
1712 int guard_off = recv->buf_stride*recv->nblocks;
1713 int tail_len = len - (guard_off - offset);
1715 if (tail_len > 0 && tail_len < recv->buf_stride) {
1716 memcpy(iso->data_buf.kvirt + guard_off,
1717 iso->data_buf.kvirt,
1721 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1724 /* parse timestamp */
1725 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1728 /* advance to next packet */
1729 recv->dma_offset += 4;
1731 /* check for wrap-around */
1732 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1733 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1736 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1743 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1746 struct ti_ohci *ohci = recv->ohci;
1748 /* loop over all blocks */
1749 for (loop = 0; loop < recv->nblocks; loop++) {
1751 /* check block_dma to see if it's done */
1752 struct dma_cmd *im = &recv->block[recv->block_dma];
1754 /* check the DMA descriptor for new writes to xferStatus */
1755 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1757 /* rescount is the number of bytes *remaining to be written* in the block */
1758 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1760 unsigned char event = xferstatus & 0x1F;
1763 /* nothing has happened to this block yet */
1767 if (event != 0x11) {
1768 atomic_inc(&iso->overflows);
1770 "IR DMA error - OHCI error code 0x%02x\n", event);
1773 if (rescount != 0) {
1774 /* the card is still writing to this block;
1775 we can't touch it until it's done */
1779 /* OK, the block is finished... */
1781 /* sync our view of the block */
1782 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1784 /* reset the DMA descriptor */
1785 im->status = recv->buf_stride;
1787 /* advance block_dma */
1788 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1790 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1791 atomic_inc(&iso->overflows);
1792 DBGMSG("ISO reception overflow - "
1793 "ran out of DMA blocks");
1797 /* parse any packets that have arrived */
1798 ohci_iso_recv_bufferfill_parse(iso, recv);
1801 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1805 struct ti_ohci *ohci = recv->ohci;
1807 /* loop over the entire buffer */
1808 for (count = 0; count < recv->nblocks; count++) {
1811 /* pointer to the DMA descriptor */
1812 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1814 /* check the DMA descriptor for new writes to xferStatus */
1815 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1816 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1818 unsigned char event = xferstatus & 0x1F;
1821 /* this packet hasn't come in yet; we are done for now */
1825 if (event == 0x11) {
1826 /* packet received successfully! */
1828 /* rescount is the number of bytes *remaining* in the packet buffer,
1829 after the packet was written */
1830 packet_len = recv->buf_stride - rescount;
1832 } else if (event == 0x02) {
1833 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1835 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1838 /* sync our view of the buffer */
1839 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1841 /* record the per-packet info */
1843 /* iso header is 8 bytes ahead of the data payload */
1846 unsigned int offset;
1847 unsigned short cycle;
1848 unsigned char channel, tag, sy;
1850 offset = iso->pkt_dma * recv->buf_stride;
1851 hdr = iso->data_buf.kvirt + offset;
1853 /* skip iso header */
1857 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1858 channel = hdr[5] & 0x3F;
1862 hpsb_iso_packet_received(iso, offset, packet_len,
1863 recv->buf_stride, cycle, channel, tag, sy);
1866 /* reset the DMA descriptor */
1867 il->status = recv->buf_stride;
1870 recv->block_dma = iso->pkt_dma;
1878 static void ohci_iso_recv_task(unsigned long data)
1880 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1881 struct ohci_iso_recv *recv = iso->hostdata;
1883 if (recv->dma_mode == BUFFER_FILL_MODE)
1884 ohci_iso_recv_bufferfill_task(iso, recv);
1886 ohci_iso_recv_packetperbuf_task(iso, recv);
1889 /***********************************
1890 * rawiso ISO transmission *
1891 ***********************************/
1893 struct ohci_iso_xmit {
1894 struct ti_ohci *ohci;
1895 struct dma_prog_region prog;
1896 struct ohci1394_iso_tasklet task;
1899 u32 ContextControlSet;
1900 u32 ContextControlClear;
1904 /* transmission DMA program:
1905 one OUTPUT_MORE_IMMEDIATE for the IT header
1906 one OUTPUT_LAST for the buffer data */
1908 struct iso_xmit_cmd {
1909 struct dma_cmd output_more_immediate;
1912 struct dma_cmd output_last;
1915 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1916 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1917 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1918 static void ohci_iso_xmit_task(unsigned long data);
1920 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1922 struct ohci_iso_xmit *xmit;
1923 unsigned int prog_size;
1927 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1931 iso->hostdata = xmit;
1932 xmit->ohci = iso->host->hostdata;
1933 xmit->task_active = 0;
1935 dma_prog_region_init(&xmit->prog);
1937 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1939 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1942 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1943 ohci_iso_xmit_task, (unsigned long) iso);
1945 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1950 xmit->task_active = 1;
1952 /* xmit context registers are spaced 16 bytes apart */
1953 ctx = xmit->task.context;
1954 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1955 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1956 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1961 ohci_iso_xmit_shutdown(iso);
1965 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1967 struct ohci_iso_xmit *xmit = iso->hostdata;
1968 struct ti_ohci *ohci = xmit->ohci;
1970 /* disable interrupts */
1971 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1974 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1975 /* XXX the DMA context will lock up if you try to send too much data! */
1977 "you probably exceeded the OHCI card's bandwidth limit - "
1978 "reload the module and reduce xmit bandwidth");
1982 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1984 struct ohci_iso_xmit *xmit = iso->hostdata;
1986 if (xmit->task_active) {
1987 ohci_iso_xmit_stop(iso);
1988 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1989 xmit->task_active = 0;
1992 dma_prog_region_free(&xmit->prog);
1994 iso->hostdata = NULL;
1997 static void ohci_iso_xmit_task(unsigned long data)
1999 struct hpsb_iso *iso = (struct hpsb_iso*) data;
2000 struct ohci_iso_xmit *xmit = iso->hostdata;
2001 struct ti_ohci *ohci = xmit->ohci;
2005 /* check the whole buffer if necessary, starting at pkt_dma */
2006 for (count = 0; count < iso->buf_packets; count++) {
2009 /* DMA descriptor */
2010 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2012 /* check for new writes to xferStatus */
2013 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2014 u8 event = xferstatus & 0x1F;
2017 /* packet hasn't been sent yet; we are done for now */
2023 "IT DMA error - OHCI error code 0x%02x\n", event);
2025 /* at least one packet went out, so wake up the writer */
2029 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2031 /* tell the subsystem the packet has gone out */
2032 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2034 /* reset the DMA descriptor for next time */
2035 cmd->output_last.status = 0;
2042 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2044 struct ohci_iso_xmit *xmit = iso->hostdata;
2045 struct ti_ohci *ohci = xmit->ohci;
2048 struct iso_xmit_cmd *next, *prev;
2050 unsigned int offset;
2052 unsigned char tag, sy;
2054 /* check that the packet doesn't cross a page boundary
2055 (we could allow this if we added OUTPUT_MORE descriptor support) */
2056 if (cross_bound(info->offset, info->len)) {
2058 "rawiso xmit: packet %u crosses a page boundary",
2063 offset = info->offset;
2068 /* sync up the card's view of the buffer */
2069 dma_region_sync_for_device(&iso->data_buf, offset, len);
2071 /* append first_packet to the DMA chain */
2072 /* by linking the previous descriptor to it */
2073 /* (next will become the new end of the DMA chain) */
2075 next_i = iso->first_packet;
2076 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2078 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2079 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2081 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2082 memset(next, 0, sizeof(struct iso_xmit_cmd));
2083 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2085 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2087 /* tcode = 0xA, and sy */
2088 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2090 /* tag and channel number */
2091 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2093 /* transmission speed */
2094 next->iso_hdr[2] = iso->speed & 0x7;
2097 next->iso_hdr[6] = len & 0xFF;
2098 next->iso_hdr[7] = len >> 8;
2100 /* set up the OUTPUT_LAST */
2101 next->output_last.control = cpu_to_le32(1 << 28);
2102 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2103 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2104 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2105 next->output_last.control |= cpu_to_le32(len);
2107 /* payload bus address */
2108 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2110 /* leave branchAddress at zero for now */
2112 /* re-write the previous DMA descriptor to chain to this one */
2114 /* set prev branch address to point to next (Z=3) */
2115 prev->output_last.branchAddress = cpu_to_le32(
2116 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2118 /* disable interrupt, unless required by the IRQ interval */
2119 if (prev_i % iso->irq_interval) {
2120 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2122 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2127 /* wake DMA in case it is sleeping */
2128 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2130 /* issue a dummy read of the cycle timer to force all PCI
2131 writes to be posted immediately */
2133 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2138 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2140 struct ohci_iso_xmit *xmit = iso->hostdata;
2141 struct ti_ohci *ohci = xmit->ohci;
2143 /* clear out the control register */
2144 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2147 /* address and length of first descriptor block (Z=3) */
2148 reg_write(xmit->ohci, xmit->CommandPtr,
2149 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2153 u32 start = cycle & 0x1FFF;
2155 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2156 just snarf them from the current time */
2157 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2159 /* advance one second to give some extra time for DMA to start */
2162 start |= (seconds & 3) << 13;
2164 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2167 /* enable interrupts */
2168 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2171 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2174 /* wait 100 usec to give the card time to go active */
2177 /* check the RUN bit */
2178 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2179 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2180 reg_read(xmit->ohci, xmit->ContextControlSet));
2187 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2192 return ohci_iso_xmit_init(iso);
2194 return ohci_iso_xmit_start(iso, arg);
2196 ohci_iso_xmit_stop(iso);
2199 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2201 ohci_iso_xmit_shutdown(iso);
2205 return ohci_iso_recv_init(iso);
2207 int *args = (int*) arg;
2208 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2211 ohci_iso_recv_stop(iso);
2214 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2217 ohci_iso_recv_task((unsigned long) iso);
2220 ohci_iso_recv_shutdown(iso);
2222 case RECV_LISTEN_CHANNEL:
2223 ohci_iso_recv_change_channel(iso, arg, 1);
2225 case RECV_UNLISTEN_CHANNEL:
2226 ohci_iso_recv_change_channel(iso, arg, 0);
2228 case RECV_SET_CHANNEL_MASK:
2229 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2233 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2240 /***************************************
2241 * IEEE-1394 functionality section END *
2242 ***************************************/
2245 /********************************************************
2246 * Global stuff (interrupt handler, init/shutdown code) *
2247 ********************************************************/
2249 static void dma_trm_reset(struct dma_trm_ctx *d)
2251 unsigned long flags;
2252 LIST_HEAD(packet_list);
2253 struct ti_ohci *ohci = d->ohci;
2254 struct hpsb_packet *packet, *ptmp;
2256 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2258 /* Lock the context, reset it and release it. Move the packets
2259 * that were pending in the context to packet_list and free
2260 * them after releasing the lock. */
2262 spin_lock_irqsave(&d->lock, flags);
2264 list_splice(&d->fifo_list, &packet_list);
2265 list_splice(&d->pending_list, &packet_list);
2266 INIT_LIST_HEAD(&d->fifo_list);
2267 INIT_LIST_HEAD(&d->pending_list);
2269 d->branchAddrPtr = NULL;
2270 d->sent_ind = d->prg_ind;
2271 d->free_prgs = d->num_desc;
2273 spin_unlock_irqrestore(&d->lock, flags);
2275 if (list_empty(&packet_list))
2278 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2280 /* Now process subsystem callbacks for the packets from this
2282 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2283 list_del_init(&packet->driver_list);
2284 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2288 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2292 struct ohci1394_iso_tasklet *t;
2294 unsigned long flags;
2296 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2298 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2299 mask = 1 << t->context;
2301 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2302 tasklet_schedule(&t->tasklet);
2303 else if (rx_event & mask)
2304 tasklet_schedule(&t->tasklet);
2307 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2310 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2311 struct pt_regs *regs_are_unused)
2313 quadlet_t event, node_id;
2314 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2315 struct hpsb_host *host = ohci->host;
2316 int phyid = -1, isroot = 0;
2317 unsigned long flags;
2319 /* Read and clear the interrupt event register. Don't clear
2320 * the busReset event, though. This is done when we get the
2321 * selfIDComplete interrupt. */
2322 spin_lock_irqsave(&ohci->event_lock, flags);
2323 event = reg_read(ohci, OHCI1394_IntEventClear);
2324 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2325 spin_unlock_irqrestore(&ohci->event_lock, flags);
2330 /* If event is ~(u32)0 cardbus card was ejected. In this case
2331 * we just return, and clean up in the ohci1394_pci_remove
2333 if (event == ~(u32) 0) {
2334 DBGMSG("Device removed.");
2338 DBGMSG("IntEvent: %08x", event);
2340 if (event & OHCI1394_unrecoverableError) {
2342 PRINT(KERN_ERR, "Unrecoverable error!");
2344 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2345 PRINT(KERN_ERR, "Async Req Tx Context died: "
2346 "ctrl[%08x] cmdptr[%08x]",
2347 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2348 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2350 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2351 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2352 "ctrl[%08x] cmdptr[%08x]",
2353 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2354 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2356 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2357 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2358 "ctrl[%08x] cmdptr[%08x]",
2359 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2360 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2362 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2363 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2364 "ctrl[%08x] cmdptr[%08x]",
2365 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2366 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2368 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2369 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2370 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2371 "ctrl[%08x] cmdptr[%08x]", ctx,
2372 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2373 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2376 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2377 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2378 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2379 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2380 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2381 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2382 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2385 event &= ~OHCI1394_unrecoverableError;
2387 if (event & OHCI1394_postedWriteErr) {
2388 PRINT(KERN_ERR, "physical posted write error");
2389 /* no recovery strategy yet, had to involve protocol drivers */
2391 if (event & OHCI1394_cycleTooLong) {
2392 if(printk_ratelimit())
2393 PRINT(KERN_WARNING, "isochronous cycle too long");
2395 DBGMSG("OHCI1394_cycleTooLong");
2396 reg_write(ohci, OHCI1394_LinkControlSet,
2397 OHCI1394_LinkControl_CycleMaster);
2398 event &= ~OHCI1394_cycleTooLong;
2400 if (event & OHCI1394_cycleInconsistent) {
2401 /* We subscribe to the cycleInconsistent event only to
2402 * clear the corresponding event bit... otherwise,
2403 * isochronous cycleMatch DMA won't work. */
2404 DBGMSG("OHCI1394_cycleInconsistent");
2405 event &= ~OHCI1394_cycleInconsistent;
2407 if (event & OHCI1394_busReset) {
2408 /* The busReset event bit can't be cleared during the
2409 * selfID phase, so we disable busReset interrupts, to
2410 * avoid burying the cpu in interrupt requests. */
2411 spin_lock_irqsave(&ohci->event_lock, flags);
2412 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2414 if (ohci->check_busreset) {
2419 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2420 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2422 spin_unlock_irqrestore(&ohci->event_lock, flags);
2424 spin_lock_irqsave(&ohci->event_lock, flags);
2426 /* The loop counter check is to prevent the driver
2427 * from remaining in this state forever. For the
2428 * initial bus reset, the loop continues for ever
2429 * and the system hangs, until some device is plugged-in
2430 * or out manually into a port! The forced reset seems
2431 * to solve this problem. This mainly effects nForce2. */
2432 if (loop_count > 10000) {
2433 ohci_devctl(host, RESET_BUS, LONG_RESET);
2434 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2441 spin_unlock_irqrestore(&ohci->event_lock, flags);
2442 if (!host->in_bus_reset) {
2443 DBGMSG("irq_handler: Bus reset requested");
2445 /* Subsystem call */
2446 hpsb_bus_reset(ohci->host);
2448 event &= ~OHCI1394_busReset;
2450 if (event & OHCI1394_reqTxComplete) {
2451 struct dma_trm_ctx *d = &ohci->at_req_context;
2452 DBGMSG("Got reqTxComplete interrupt "
2453 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2454 if (reg_read(ohci, d->ctrlSet) & 0x800)
2455 ohci1394_stop_context(ohci, d->ctrlClear,
2458 dma_trm_tasklet((unsigned long)d);
2459 //tasklet_schedule(&d->task);
2460 event &= ~OHCI1394_reqTxComplete;
2462 if (event & OHCI1394_respTxComplete) {
2463 struct dma_trm_ctx *d = &ohci->at_resp_context;
2464 DBGMSG("Got respTxComplete interrupt "
2465 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2466 if (reg_read(ohci, d->ctrlSet) & 0x800)
2467 ohci1394_stop_context(ohci, d->ctrlClear,
2470 tasklet_schedule(&d->task);
2471 event &= ~OHCI1394_respTxComplete;
2473 if (event & OHCI1394_RQPkt) {
2474 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2475 DBGMSG("Got RQPkt interrupt status=0x%08X",
2476 reg_read(ohci, d->ctrlSet));
2477 if (reg_read(ohci, d->ctrlSet) & 0x800)
2478 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2480 tasklet_schedule(&d->task);
2481 event &= ~OHCI1394_RQPkt;
2483 if (event & OHCI1394_RSPkt) {
2484 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2485 DBGMSG("Got RSPkt interrupt status=0x%08X",
2486 reg_read(ohci, d->ctrlSet));
2487 if (reg_read(ohci, d->ctrlSet) & 0x800)
2488 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2490 tasklet_schedule(&d->task);
2491 event &= ~OHCI1394_RSPkt;
2493 if (event & OHCI1394_isochRx) {
2496 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2497 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2498 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2499 event &= ~OHCI1394_isochRx;
2501 if (event & OHCI1394_isochTx) {
2504 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2505 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2506 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2507 event &= ~OHCI1394_isochTx;
2509 if (event & OHCI1394_selfIDComplete) {
2510 if (host->in_bus_reset) {
2511 node_id = reg_read(ohci, OHCI1394_NodeID);
2513 if (!(node_id & 0x80000000)) {
2515 "SelfID received, but NodeID invalid "
2516 "(probably new bus reset occurred): %08X",
2518 goto selfid_not_valid;
2521 phyid = node_id & 0x0000003f;
2522 isroot = (node_id & 0x40000000) != 0;
2524 DBGMSG("SelfID interrupt received "
2525 "(phyid %d, %s)", phyid,
2526 (isroot ? "root" : "not root"));
2528 handle_selfid(ohci, host, phyid, isroot);
2530 /* Clear the bus reset event and re-enable the
2531 * busReset interrupt. */
2532 spin_lock_irqsave(&ohci->event_lock, flags);
2533 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2534 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2535 spin_unlock_irqrestore(&ohci->event_lock, flags);
2537 /* Turn on phys dma reception.
2539 * TODO: Enable some sort of filtering management.
2542 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2544 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2548 DBGMSG("PhyReqFilter=%08x%08x",
2549 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2550 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2552 hpsb_selfid_complete(host, phyid, isroot);
2555 "SelfID received outside of bus reset sequence");
2558 event &= ~OHCI1394_selfIDComplete;
2561 /* Make sure we handle everything, just in case we accidentally
2562 * enabled an interrupt that we didn't write a handler for. */
2564 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2570 /* Put the buffer back into the dma context */
2571 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2573 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2574 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2576 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2577 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2578 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2579 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2581 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2582 * context program descriptors before it sees the wakeup bit set. */
2585 /* wake up the dma context if necessary */
2586 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2588 "Waking dma ctx=%d ... processing is probably too slow",
2592 /* do this always, to avoid race condition */
2593 reg_write(ohci, d->ctrlSet, 0x1000);
2596 #define cond_le32_to_cpu(data, noswap) \
2597 (noswap ? data : le32_to_cpu(data))
2599 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2600 -1, 0, -1, 0, -1, -1, 16, -1};
2603 * Determine the length of a packet in the buffer
2604 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2606 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2607 int offset, unsigned char tcode, int noswap)
2611 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2612 length = TCODE_SIZE[tcode];
2614 if (offset + 12 >= d->buf_size) {
2615 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2616 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2618 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2622 } else if (d->type == DMA_CTX_ISO) {
2623 /* Assumption: buffer fill mode with header/trailer */
2624 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2627 if (length > 0 && length % 4)
2628 length += 4 - (length % 4);
2633 /* Tasklet that processes dma receive buffers */
2634 static void dma_rcv_tasklet (unsigned long data)
2636 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2637 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2638 unsigned int split_left, idx, offset, rescount;
2639 unsigned char tcode;
2640 int length, bytes_left, ack;
2641 unsigned long flags;
2646 spin_lock_irqsave(&d->lock, flags);
2649 offset = d->buf_offset;
2650 buf_ptr = d->buf_cpu[idx] + offset/4;
2652 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2653 bytes_left = d->buf_size - rescount - offset;
2655 while (bytes_left > 0) {
2656 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2658 /* packet_length() will return < 4 for an error */
2659 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2661 if (length < 4) { /* something is wrong */
2662 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2663 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2665 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2666 spin_unlock_irqrestore(&d->lock, flags);
2670 /* The first case is where we have a packet that crosses
2671 * over more than one descriptor. The next case is where
2672 * it's all in the first descriptor. */
2673 if ((offset + length) > d->buf_size) {
2674 DBGMSG("Split packet rcv'd");
2675 if (length > d->split_buf_size) {
2676 ohci1394_stop_context(ohci, d->ctrlClear,
2677 "Split packet size exceeded");
2679 d->buf_offset = offset;
2680 spin_unlock_irqrestore(&d->lock, flags);
2684 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2686 /* Other part of packet not written yet.
2687 * this should never happen I think
2688 * anyway we'll get it on the next call. */
2690 "Got only half a packet!");
2692 d->buf_offset = offset;
2693 spin_unlock_irqrestore(&d->lock, flags);
2697 split_left = length;
2698 split_ptr = (char *)d->spb;
2699 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2700 split_left -= d->buf_size-offset;
2701 split_ptr += d->buf_size-offset;
2702 insert_dma_buffer(d, idx);
2703 idx = (idx+1) % d->num_desc;
2704 buf_ptr = d->buf_cpu[idx];
2707 while (split_left >= d->buf_size) {
2708 memcpy(split_ptr,buf_ptr,d->buf_size);
2709 split_ptr += d->buf_size;
2710 split_left -= d->buf_size;
2711 insert_dma_buffer(d, idx);
2712 idx = (idx+1) % d->num_desc;
2713 buf_ptr = d->buf_cpu[idx];
2716 if (split_left > 0) {
2717 memcpy(split_ptr, buf_ptr, split_left);
2718 offset = split_left;
2719 buf_ptr += offset/4;
2722 DBGMSG("Single packet rcv'd");
2723 memcpy(d->spb, buf_ptr, length);
2725 buf_ptr += length/4;
2726 if (offset==d->buf_size) {
2727 insert_dma_buffer(d, idx);
2728 idx = (idx+1) % d->num_desc;
2729 buf_ptr = d->buf_cpu[idx];
2734 /* We get one phy packet to the async descriptor for each
2735 * bus reset. We always ignore it. */
2736 if (tcode != OHCI1394_TCODE_PHY) {
2737 if (!ohci->no_swap_incoming)
2738 packet_swab(d->spb, tcode);
2739 DBGMSG("Packet received from node"
2740 " %d ack=0x%02X spd=%d tcode=0x%X"
2741 " length=%d ctx=%d tlabel=%d",
2742 (d->spb[1]>>16)&0x3f,
2743 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2744 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2745 tcode, length, d->ctx,
2746 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2748 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2751 hpsb_packet_received(ohci->host, d->spb,
2754 #ifdef OHCI1394_DEBUG
2756 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2760 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2762 bytes_left = d->buf_size - rescount - offset;
2767 d->buf_offset = offset;
2769 spin_unlock_irqrestore(&d->lock, flags);
2772 /* Bottom half that processes sent packets */
2773 static void dma_trm_tasklet (unsigned long data)
2775 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2776 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2777 struct hpsb_packet *packet, *ptmp;
2778 unsigned long flags;
2782 spin_lock_irqsave(&d->lock, flags);
2784 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2785 datasize = packet->data_size;
2786 if (datasize && packet->type != hpsb_raw)
2787 status = le32_to_cpu(
2788 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2790 status = le32_to_cpu(
2791 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2794 /* this packet hasn't been sent yet*/
2797 #ifdef OHCI1394_DEBUG
2799 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2800 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2801 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2802 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2803 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2804 status&0x1f, (status>>5)&0x3,
2805 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2808 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2809 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2810 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2811 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2812 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2813 status&0x1f, (status>>5)&0x3,
2814 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2817 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2818 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2819 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2821 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2823 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2825 status&0x1f, (status>>5)&0x3,
2826 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2830 if (status & 0x10) {
2833 switch (status & 0x1f) {
2834 case EVT_NO_STATUS: /* that should never happen */
2835 case EVT_RESERVED_A: /* that should never happen */
2836 case EVT_LONG_PACKET: /* that should never happen */
2837 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2838 ack = ACKX_SEND_ERROR;
2840 case EVT_MISSING_ACK:
2844 ack = ACKX_SEND_ERROR;
2846 case EVT_OVERRUN: /* that should never happen */
2847 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2848 ack = ACKX_SEND_ERROR;
2850 case EVT_DESCRIPTOR_READ:
2852 case EVT_DATA_WRITE:
2853 ack = ACKX_SEND_ERROR;
2855 case EVT_BUS_RESET: /* that should never happen */
2856 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2857 ack = ACKX_SEND_ERROR;
2863 ack = ACKX_SEND_ERROR;
2865 case EVT_RESERVED_B: /* that should never happen */
2866 case EVT_RESERVED_C: /* that should never happen */
2867 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2868 ack = ACKX_SEND_ERROR;
2872 ack = ACKX_SEND_ERROR;
2875 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2876 ack = ACKX_SEND_ERROR;
2881 list_del_init(&packet->driver_list);
2882 hpsb_packet_sent(ohci->host, packet, ack);
2885 pci_unmap_single(ohci->dev,
2886 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2887 datasize, PCI_DMA_TODEVICE);
2888 OHCI_DMA_FREE("single Xmit data packet");
2891 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2895 dma_trm_flush(ohci, d);
2897 spin_unlock_irqrestore(&d->lock, flags);
2900 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2903 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2905 if (d->type == DMA_CTX_ISO) {
2906 /* disable interrupts */
2907 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2908 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2910 tasklet_kill(&d->task);
2916 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2919 struct ti_ohci *ohci = d->ohci;
2924 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2927 for (i=0; i<d->num_desc; i++)
2928 if (d->buf_cpu[i] && d->buf_bus[i]) {
2929 pci_free_consistent(
2930 ohci->dev, d->buf_size,
2931 d->buf_cpu[i], d->buf_bus[i]);
2932 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2938 for (i=0; i<d->num_desc; i++)
2939 if (d->prg_cpu[i] && d->prg_bus[i]) {
2940 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2941 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2943 pci_pool_destroy(d->prg_pool);
2944 OHCI_DMA_FREE("dma_rcv prg pool");
2950 /* Mark this context as freed. */
2955 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2956 enum context_type type, int ctx, int num_desc,
2957 int buf_size, int split_buf_size, int context_base)
2960 static int num_allocs;
2961 static char pool_name[20];
2967 d->num_desc = num_desc;
2968 d->buf_size = buf_size;
2969 d->split_buf_size = split_buf_size;
2975 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2976 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2978 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2979 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2980 free_dma_rcv_ctx(d);
2984 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2985 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2987 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2988 PRINT(KERN_ERR, "Failed to allocate dma prg");
2989 free_dma_rcv_ctx(d);
2993 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2995 if (d->spb == NULL) {
2996 PRINT(KERN_ERR, "Failed to allocate split buffer");
2997 free_dma_rcv_ctx(d);
3001 len = sprintf(pool_name, "ohci1394_rcv_prg");
3002 sprintf(pool_name+len, "%d", num_allocs);
3003 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3004 sizeof(struct dma_cmd), 4, 0);
3005 if(d->prg_pool == NULL)
3007 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3008 free_dma_rcv_ctx(d);
3013 OHCI_DMA_ALLOC("dma_rcv prg pool");
3015 for (i=0; i<d->num_desc; i++) {
3016 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3019 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3021 if (d->buf_cpu[i] != NULL) {
3022 memset(d->buf_cpu[i], 0, d->buf_size);
3025 "Failed to allocate dma buffer");
3026 free_dma_rcv_ctx(d);
3030 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3031 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3033 if (d->prg_cpu[i] != NULL) {
3034 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3037 "Failed to allocate dma prg");
3038 free_dma_rcv_ctx(d);
3043 spin_lock_init(&d->lock);
3045 if (type == DMA_CTX_ISO) {
3046 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3047 OHCI_ISO_MULTICHANNEL_RECEIVE,
3048 dma_rcv_tasklet, (unsigned long) d);
3050 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3051 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3052 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3054 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3060 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3063 struct ti_ohci *ohci = d->ohci;
3068 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3071 for (i=0; i<d->num_desc; i++)
3072 if (d->prg_cpu[i] && d->prg_bus[i]) {
3073 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3074 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3076 pci_pool_destroy(d->prg_pool);
3077 OHCI_DMA_FREE("dma_trm prg pool");
3082 /* Mark this context as freed. */
3087 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3088 enum context_type type, int ctx, int num_desc,
3092 static char pool_name[20];
3093 static int num_allocs=0;
3098 d->num_desc = num_desc;
3103 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3104 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3106 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3107 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3108 free_dma_trm_ctx(d);
3112 len = sprintf(pool_name, "ohci1394_trm_prg");
3113 sprintf(pool_name+len, "%d", num_allocs);
3114 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3115 sizeof(struct at_dma_prg), 4, 0);
3116 if (d->prg_pool == NULL) {
3117 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3118 free_dma_trm_ctx(d);
3123 OHCI_DMA_ALLOC("dma_rcv prg pool");
3125 for (i = 0; i < d->num_desc; i++) {
3126 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3127 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3129 if (d->prg_cpu[i] != NULL) {
3130 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3133 "Failed to allocate at dma prg");
3134 free_dma_trm_ctx(d);
3139 spin_lock_init(&d->lock);
3141 /* initialize tasklet */
3142 if (type == DMA_CTX_ISO) {
3143 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3144 dma_trm_tasklet, (unsigned long) d);
3145 if (ohci1394_register_iso_tasklet(ohci,
3146 &ohci->it_legacy_tasklet) < 0) {
3147 PRINT(KERN_ERR, "No IT DMA context available");
3148 free_dma_trm_ctx(d);
3152 /* IT can be assigned to any context by register_iso_tasklet */
3153 d->ctx = ohci->it_legacy_tasklet.context;
3154 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3155 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3156 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3158 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3159 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3160 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3161 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3167 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3169 struct ti_ohci *ohci = host->hostdata;
3171 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3172 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3174 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3178 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3179 quadlet_t data, quadlet_t compare)
3181 struct ti_ohci *ohci = host->hostdata;
3184 reg_write(ohci, OHCI1394_CSRData, data);
3185 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3186 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3188 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3189 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3195 return reg_read(ohci, OHCI1394_CSRData);
3198 static struct hpsb_host_driver ohci1394_driver = {
3199 .owner = THIS_MODULE,
3200 .name = OHCI1394_DRIVER_NAME,
3201 .set_hw_config_rom = ohci_set_hw_config_rom,
3202 .transmit_packet = ohci_transmit,
3203 .devctl = ohci_devctl,
3204 .isoctl = ohci_isoctl,
3205 .hw_csr_reg = ohci_hw_csr_reg,
3208 /***********************************
3209 * PCI Driver Interface functions *
3210 ***********************************/
3212 #define FAIL(err, fmt, args...) \
3214 PRINT_G(KERN_ERR, fmt , ## args); \
3215 ohci1394_pci_remove(dev); \
3219 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3220 const struct pci_device_id *ent)
3222 struct hpsb_host *host;
3223 struct ti_ohci *ohci; /* shortcut to currently handled device */
3224 unsigned long ohci_base;
3226 if (pci_enable_device(dev))
3227 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3228 pci_set_master(dev);
3230 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3231 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3233 ohci = host->hostdata;
3236 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3238 pci_set_drvdata(dev, ohci);
3240 /* We don't want hardware swapping */
3241 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3243 /* Some oddball Apple controllers do not order the selfid
3244 * properly, so we make up for it here. */
3245 #ifndef __LITTLE_ENDIAN
3246 /* XXX: Need a better way to check this. I'm wondering if we can
3247 * read the values of the OHCI1394_PCI_HCI_Control and the
3248 * noByteSwapData registers to see if they were not cleared to
3249 * zero. Should this work? Obviously it's not defined what these
3250 * registers will read when they aren't supported. Bleh! */
3251 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3252 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3253 ohci->no_swap_incoming = 1;
3254 ohci->selfid_swap = 0;
3256 ohci->selfid_swap = 1;
3260 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3261 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3264 /* These chipsets require a bit of extra care when checking after
3266 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3267 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3268 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3269 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3270 ohci->check_busreset = 1;
3272 /* We hardwire the MMIO length, since some CardBus adaptors
3273 * fail to report the right length. Anyway, the ohci spec
3274 * clearly says it's 2kb, so this shouldn't be a problem. */
3275 ohci_base = pci_resource_start(dev, 0);
3276 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3277 PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
3278 pci_resource_len(dev, 0));
3280 /* Seems PCMCIA handles this internally. Not sure why. Seems
3281 * pretty bogus to force a driver to special case this. */
3283 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3284 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3285 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3287 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3289 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3290 if (ohci->registers == NULL)
3291 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3292 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3293 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3295 /* csr_config rom allocation */
3296 ohci->csr_config_rom_cpu =
3297 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3298 &ohci->csr_config_rom_bus);
3299 OHCI_DMA_ALLOC("consistent csr_config_rom");
3300 if (ohci->csr_config_rom_cpu == NULL)
3301 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3302 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3304 /* self-id dma buffer allocation */
3305 ohci->selfid_buf_cpu =
3306 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3307 &ohci->selfid_buf_bus);
3308 OHCI_DMA_ALLOC("consistent selfid_buf");
3310 if (ohci->selfid_buf_cpu == NULL)
3311 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3312 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3314 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3315 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3316 "8Kb boundary... may cause problems on some CXD3222 chip",
3317 ohci->selfid_buf_cpu);
3319 /* No self-id errors at startup */
3320 ohci->self_id_errors = 0;
3322 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3323 /* AR DMA request context allocation */
3324 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3325 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3326 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3327 OHCI1394_AsReqRcvContextBase) < 0)
3328 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3330 /* AR DMA response context allocation */
3331 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3332 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3333 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3334 OHCI1394_AsRspRcvContextBase) < 0)
3335 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3337 /* AT DMA request context */
3338 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3339 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3340 OHCI1394_AsReqTrContextBase) < 0)
3341 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3343 /* AT DMA response context */
3344 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3345 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3346 OHCI1394_AsRspTrContextBase) < 0)
3347 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3349 /* Start off with a soft reset, to clear everything to a sane
3351 ohci_soft_reset(ohci);
3353 /* Now enable LPS, which we need in order to start accessing
3354 * most of the registers. In fact, on some cards (ALI M5251),
3355 * accessing registers in the SClk domain without LPS enabled
3356 * will lock up the machine. Wait 50msec to make sure we have
3357 * full link enabled. */
3358 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3360 /* Disable and clear interrupts */
3361 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3362 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3366 /* Determine the number of available IR and IT contexts. */
3367 ohci->nb_iso_rcv_ctx =
3368 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3369 ohci->nb_iso_xmit_ctx =
3370 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3372 /* Set the usage bits for non-existent contexts so they can't
3374 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3375 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3377 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3378 spin_lock_init(&ohci->iso_tasklet_list_lock);
3379 ohci->ISO_channel_usage = 0;
3380 spin_lock_init(&ohci->IR_channel_lock);
3382 /* Allocate the IR DMA context right here so we don't have
3383 * to do it in interrupt path - note that this doesn't
3384 * waste much memory and avoids the jugglery required to
3385 * allocate it in IRQ path. */
3386 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3387 DMA_CTX_ISO, 0, IR_NUM_DESC,
3388 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3389 OHCI1394_IsoRcvContextBase) < 0) {
3390 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3393 /* We hopefully don't have to pre-allocate IT DMA like we did
3394 * for IR DMA above. Allocate it on-demand and mark inactive. */
3395 ohci->it_legacy_context.ohci = NULL;
3396 spin_lock_init(&ohci->event_lock);
3399 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3400 * might get called anyway. We'll see no event, of course, but
3401 * we need to get to that "no event", so enough should be initialized
3404 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3405 OHCI1394_DRIVER_NAME, ohci))
3406 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3408 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3409 ohci_initialize(ohci);
3411 /* Set certain csr values */
3412 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3413 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3414 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3415 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3416 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3419 host->low_addr_space =
3420 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3421 if (!host->low_addr_space)
3422 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3424 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3426 /* Tell the highlevel this host is ready */
3427 if (hpsb_add_host(host))
3428 FAIL(-ENOMEM, "Failed to register host with highlevel");
3430 ohci->init_state = OHCI_INIT_DONE;
3436 static void ohci1394_pci_remove(struct pci_dev *pdev)
3438 struct ti_ohci *ohci;
3441 ohci = pci_get_drvdata(pdev);
3445 dev = get_device(&ohci->host->device);
3447 switch (ohci->init_state) {
3448 case OHCI_INIT_DONE:
3449 hpsb_remove_host(ohci->host);
3451 /* Clear out BUS Options */
3452 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3453 reg_write(ohci, OHCI1394_BusOptions,
3454 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3456 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3458 case OHCI_INIT_HAVE_IRQ:
3459 /* Clear interrupt registers */
3460 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3461 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3462 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3463 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3464 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3465 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3467 /* Disable IRM Contender */
3468 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3470 /* Clear link control register */
3471 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3473 /* Let all other nodes know to ignore us */
3474 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3476 /* Soft reset before we start - this disables
3477 * interrupts and clears linkEnable and LPS. */
3478 ohci_soft_reset(ohci);
3479 free_irq(ohci->dev->irq, ohci);
3481 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3482 /* The ohci_soft_reset() stops all DMA contexts, so we
3483 * dont need to do this. */
3484 free_dma_rcv_ctx(&ohci->ar_req_context);
3485 free_dma_rcv_ctx(&ohci->ar_resp_context);
3486 free_dma_trm_ctx(&ohci->at_req_context);
3487 free_dma_trm_ctx(&ohci->at_resp_context);
3488 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3489 free_dma_trm_ctx(&ohci->it_legacy_context);
3491 case OHCI_INIT_HAVE_SELFID_BUFFER:
3492 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3493 ohci->selfid_buf_cpu,
3494 ohci->selfid_buf_bus);
3495 OHCI_DMA_FREE("consistent selfid_buf");
3497 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3498 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3499 ohci->csr_config_rom_cpu,
3500 ohci->csr_config_rom_bus);
3501 OHCI_DMA_FREE("consistent csr_config_rom");
3503 case OHCI_INIT_HAVE_IOMAPPING:
3504 iounmap(ohci->registers);
3506 case OHCI_INIT_HAVE_MEM_REGION:
3508 release_mem_region(pci_resource_start(ohci->dev, 0),
3509 OHCI1394_REGISTER_SIZE);
3512 #ifdef CONFIG_PPC_PMAC
3513 /* On UniNorth, power down the cable and turn off the chip
3514 * clock when the module is removed to save power on
3515 * laptops. Turning it back ON is done by the arch code when
3516 * pci_enable_device() is called */
3518 struct device_node* of_node;
3520 of_node = pci_device_to_OF_node(ohci->dev);
3522 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3523 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3526 #endif /* CONFIG_PPC_PMAC */
3528 case OHCI_INIT_ALLOC_HOST:
3529 pci_set_drvdata(ohci->dev, NULL);
3537 static int ohci1394_pci_resume (struct pci_dev *pdev)
3539 #ifdef CONFIG_PPC_PMAC
3540 if (machine_is(powermac)) {
3541 struct device_node *of_node;
3543 /* Re-enable 1394 */
3544 of_node = pci_device_to_OF_node (pdev);
3546 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3548 #endif /* CONFIG_PPC_PMAC */
3550 pci_enable_device(pdev);
3556 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3558 #ifdef CONFIG_PPC_PMAC
3559 if (machine_is(powermac)) {
3560 struct device_node *of_node;
3563 of_node = pci_device_to_OF_node (pdev);
3565 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3573 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3575 static struct pci_device_id ohci1394_pci_tbl[] = {
3577 .class = PCI_CLASS_FIREWIRE_OHCI,
3578 .class_mask = PCI_ANY_ID,
3579 .vendor = PCI_ANY_ID,
3580 .device = PCI_ANY_ID,
3581 .subvendor = PCI_ANY_ID,
3582 .subdevice = PCI_ANY_ID,
3587 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3589 static struct pci_driver ohci1394_pci_driver = {
3590 .name = OHCI1394_DRIVER_NAME,
3591 .id_table = ohci1394_pci_tbl,
3592 .probe = ohci1394_pci_probe,
3593 .remove = ohci1394_pci_remove,
3594 .resume = ohci1394_pci_resume,
3595 .suspend = ohci1394_pci_suspend,
3598 /***********************************
3599 * OHCI1394 Video Interface *
3600 ***********************************/
3602 /* essentially the only purpose of this code is to allow another
3603 module to hook into ohci's interrupt handler */
3605 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3609 /* stop the channel program if it's still running */
3610 reg_write(ohci, reg, 0x8000);
3612 /* Wait until it effectively stops */
3613 while (reg_read(ohci, reg) & 0x400) {
3617 "Runaway loop while stopping context: %s...", msg ? msg : "");
3624 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3628 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3629 void (*func)(unsigned long), unsigned long data)
3631 tasklet_init(&tasklet->tasklet, func, data);
3632 tasklet->type = type;
3633 /* We init the tasklet->link field, so we can list_del() it
3634 * without worrying whether it was added to the list or not. */
3635 INIT_LIST_HEAD(&tasklet->link);
3638 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3639 struct ohci1394_iso_tasklet *tasklet)
3641 unsigned long flags, *usage;
3642 int n, i, r = -EBUSY;
3644 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3645 n = ohci->nb_iso_xmit_ctx;
3646 usage = &ohci->it_ctx_usage;
3649 n = ohci->nb_iso_rcv_ctx;
3650 usage = &ohci->ir_ctx_usage;
3652 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3653 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3654 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3660 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3662 for (i = 0; i < n; i++)
3663 if (!test_and_set_bit(i, usage)) {
3664 tasklet->context = i;
3665 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3670 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3675 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3676 struct ohci1394_iso_tasklet *tasklet)
3678 unsigned long flags;
3680 tasklet_kill(&tasklet->tasklet);
3682 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3684 if (tasklet->type == OHCI_ISO_TRANSMIT)
3685 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3687 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3689 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3690 clear_bit(0, &ohci->ir_multichannel_used);
3694 list_del(&tasklet->link);
3696 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3699 EXPORT_SYMBOL(ohci1394_stop_context);
3700 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3701 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3702 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3704 /***********************************
3705 * General module initialization *
3706 ***********************************/
3708 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3709 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3710 MODULE_LICENSE("GPL");
3712 static void __exit ohci1394_cleanup (void)
3714 pci_unregister_driver(&ohci1394_pci_driver);
3717 static int __init ohci1394_init(void)
3719 return pci_register_driver(&ohci1394_pci_driver);
3722 module_init(ohci1394_init);
3723 module_exit(ohci1394_cleanup);