1 /******************************************************************
2 * Copyright 2005 Mentor Graphics Corporation
3 * Copyright (C) 2005-2006 by Texas Instruments
4 * Copyright (C) 2006 by Nokia Corporation
6 * This file is part of the Inventra Controller Driver for Linux.
8 * The Inventra Controller Driver for Linux is free software; you
9 * can redistribute it and/or modify it under the terms of the GNU
10 * General Public License version 2 as published by the Free Software
13 * The Inventra Controller Driver for Linux is distributed in
14 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
15 * without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 * License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with The Inventra Controller Driver for Linux ; if not,
21 * write to the Free Software Foundation, Inc., 59 Temple Place,
22 * Suite 330, Boston, MA 02111-1307 USA
24 * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
25 * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
26 * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
27 * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
28 * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
29 * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
30 * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
31 * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
32 * GRAPHICS SUPPORT CUSTOMER.
33 ******************************************************************/
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
45 #include "musb_host.h"
48 /* MUSB HOST status 22-mar-2006
50 * - There's still lots of partial code duplication for fault paths, so
51 * they aren't handled as consistently as they need to be.
53 * - PIO mostly behaved when last tested.
54 * + including ep0, with all usbtest cases 9, 10
55 * + usbtest 14 (ep0out) doesn't seem to run at all
56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57 * configurations, but otherwise double buffering passes basic tests.
58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
60 * - DMA (CPPI) ... partially behaves, not currently recommended
61 * + about 1/15 the speed of typical EHCI implementations (PCI)
62 * + RX, all too often reqpkt seems to misbehave after tx
63 * + TX, no known issues (other than evident silicon issue)
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
67 * - Still no traffic scheduling code to make NAKing for bulk or control
68 * transfers unable to starve other requests; or to make efficient use
69 * of hardware with periodic transfers. (Note that network drivers
70 * commonly post bulk reads that stay pending for a long time; these
71 * would make very visible trouble.)
73 * - Not tested with HNP, but some SRP paths seem to behave.
77 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
78 * extra endpoint for periodic use enabling hub + keybd + mouse. That
79 * mostly works, except that with "usbnet" it's easy to trigger cases
80 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
81 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
82 * although ARP RX wins. (That test was done with a full speed link.)
87 * NOTE on endpoint usage:
89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others
94 * need to perform transfers in that same direction. The same thing could
95 * be done in software though, assuming dma cooperates.)
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic: the endpoint will be
99 * "claimed" until its software queue is no longer refilled. No multiplexing
100 * of transfers between endpoints, or anything clever.
104 /*************************** Forwards ***************************/
106 static void musb_ep_program(struct musb *pThis, u8 bEnd,
107 struct urb *pUrb, unsigned int nOut,
108 u8 * pBuffer, u32 dwLength);
111 * Clear TX fifo. Needed to avoid BABBLE errors.
113 static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
115 void __iomem *epio = ep->regs;
119 csr = musb_readw(epio, MGC_O_HDRC_TXCSR);
120 while (csr & MGC_M_TXCSR_FIFONOTEMPTY) {
121 DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122 csr |= MGC_M_TXCSR_FLUSHFIFO;
123 musb_writew(epio, MGC_O_HDRC_TXCSR, csr);
124 csr = musb_readw(epio, MGC_O_HDRC_TXCSR);
126 ERR("Could not flush host TX fifo: csr: %04x\n", csr);
134 * Start transmit. Caller is responsible for locking shared resources.
135 * pThis must be locked.
137 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
141 /* NOTE: no locks here; caller should lock and select EP */
143 txcsr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR);
144 txcsr |= MGC_M_TXCSR_TXPKTRDY | MGC_M_TXCSR_H_WZC_BITS;
145 musb_writew(ep->regs, MGC_O_HDRC_TXCSR, txcsr);
147 txcsr = MGC_M_CSR0_H_SETUPPKT | MGC_M_CSR0_TXPKTRDY;
148 musb_writew(ep->regs, MGC_O_HDRC_CSR0, txcsr);
153 static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
157 /* NOTE: no locks here; caller should lock and select EP */
158 txcsr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR);
159 txcsr |= MGC_M_TXCSR_DMAENAB | MGC_M_TXCSR_H_WZC_BITS;
160 musb_writew(ep->regs, MGC_O_HDRC_TXCSR, txcsr);
164 * Start the URB at the front of an endpoint's queue
165 * end must be claimed from the caller.
167 * Context: controller locked, irqs blocked
170 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
175 void __iomem *pBase = musb->pRegs;
176 struct urb *urb = next_urb(qh);
177 struct musb_hw_ep *pEnd = qh->hw_ep;
178 unsigned nPipe = urb->pipe;
179 u8 bAddress = usb_pipedevice(nPipe);
180 int bEnd = pEnd->bLocalEnd;
182 /* initialize software qh state */
186 /* gather right source of data */
188 case USB_ENDPOINT_XFER_CONTROL:
189 /* control transfers always start with SETUP */
192 musb->bEnd0Stage = MGC_END0_START;
193 pBuffer = urb->setup_packet;
196 case USB_ENDPOINT_XFER_ISOC:
199 pBuffer = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
200 dwLength = urb->iso_frame_desc[0].length;
202 default: /* bulk, interrupt */
203 pBuffer = urb->transfer_buffer;
204 dwLength = urb->transfer_buffer_length;
207 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
208 qh, urb, bAddress, qh->epnum,
209 is_in ? "in" : "out",
210 ({char *s; switch (qh->type) {
211 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
212 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
213 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
214 default: s = "-intr"; break;
216 bEnd, pBuffer, dwLength);
218 /* Configure endpoint */
219 if (is_in || pEnd->bIsSharedFifo)
223 musb_ep_program(musb, bEnd, urb, !is_in, pBuffer, dwLength);
225 /* transmit may have more work: start it when it is time */
229 /* determine if the time is right for a periodic transfer */
231 case USB_ENDPOINT_XFER_ISOC:
232 case USB_ENDPOINT_XFER_INT:
233 DBG(3, "check whether there's still time for periodic Tx\n");
235 wFrame = musb_readw(pBase, MGC_O_HDRC_FRAME);
236 /* FIXME this doesn't implement that scheduling policy ...
237 * or handle framecounter wrapping
239 if ((urb->transfer_flags & URB_ISO_ASAP)
240 || (wFrame >= urb->start_frame)) {
241 /* REVISIT the SOF irq handler shouldn't duplicate
242 * this code; and we don't init urb->start_frame...
247 qh->frame = urb->start_frame;
248 /* enable SOF interrupt so we can count down */
249 DBG(1,"SOF for %d\n", bEnd);
250 #if 1 // ifndef CONFIG_ARCH_DAVINCI
251 musb_writeb(pBase, MGC_O_HDRC_INTRUSBE, 0xff);
257 DBG(4, "Start TX%d %s\n", bEnd,
258 pEnd->tx_channel ? "dma" : "pio");
260 if (!pEnd->tx_channel)
261 musb_h_tx_start(pEnd);
262 else if (is_cppi_enabled() || tusb_dma_omap())
263 cppi_host_txdma_start(pEnd);
267 /* caller owns controller lock, irqs are blocked */
269 __musb_giveback(struct musb *musb, struct urb *urb, int status)
270 __releases(musb->Lock)
271 __acquires(musb->Lock)
273 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
274 && (urb->actual_length < urb->transfer_buffer_length)
276 && usb_pipein(urb->pipe))
279 spin_lock(&urb->lock);
281 if (urb->status == -EINPROGRESS)
282 urb->status = status;
283 spin_unlock(&urb->lock);
285 DBG(({ int level; switch (urb->status) {
289 /* common/boring faults */
300 "complete %p (%d), dev%d ep%d%s, %d/%d\n",
302 usb_pipedevice(urb->pipe),
303 usb_pipeendpoint(urb->pipe),
304 usb_pipein(urb->pipe) ? "in" : "out",
305 urb->actual_length, urb->transfer_buffer_length
308 spin_unlock(&musb->Lock);
309 usb_hcd_giveback_urb(musb_to_hcd(musb), urb);
310 spin_lock(&musb->Lock);
313 /* for bulk/interrupt endpoints only */
314 static inline void musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
316 struct usb_device *udev = urb->dev;
318 void __iomem *epio = ep->regs;
321 /* FIXME: the current Mentor DMA code seems to have
322 * problems getting toggle correct.
325 if (is_in || ep->bIsSharedFifo)
331 csr = musb_readw(epio, MGC_O_HDRC_TXCSR);
332 usb_settoggle(udev, qh->epnum, 1,
333 (csr & MGC_M_TXCSR_H_DATATOGGLE)
336 csr = musb_readw(epio, MGC_O_HDRC_RXCSR);
337 usb_settoggle(udev, qh->epnum, 0,
338 (csr & MGC_M_RXCSR_H_DATATOGGLE)
343 /* caller owns controller lock, irqs are blocked */
344 static struct musb_qh *
345 musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
348 struct musb_hw_ep *ep = qh->hw_ep;
349 struct musb *musb = ep->musb;
350 int ready = qh->is_ready;
352 if (ep->bIsSharedFifo)
355 is_in = usb_pipein(urb->pipe);
357 /* save toggle eagerly, for paranoia */
359 case USB_ENDPOINT_XFER_BULK:
360 case USB_ENDPOINT_XFER_INT:
361 musb_save_toggle(ep, is_in, urb);
363 case USB_ENDPOINT_XFER_ISOC:
364 if (status == 0 && urb->error_count)
370 __musb_giveback(musb, urb, status);
371 qh->is_ready = ready;
373 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
374 * invalidate qh as soon as list_empty(&hep->urb_list)
376 if (list_empty(&qh->hep->urb_list)) {
377 struct list_head *head;
384 /* clobber old pointers to this qh */
385 if (is_in || ep->bIsSharedFifo)
389 qh->hep->hcpriv = NULL;
393 case USB_ENDPOINT_XFER_ISOC:
394 case USB_ENDPOINT_XFER_INT:
395 /* this is where periodic bandwidth should be
396 * de-allocated if it's tracked and allocated;
397 * and where we'd update the schedule tree...
399 musb->periodic[ep->bLocalEnd] = NULL;
404 case USB_ENDPOINT_XFER_CONTROL:
405 case USB_ENDPOINT_XFER_BULK:
406 /* fifo policy for these lists, except that NAKing
407 * should rotate a qh to the end (for fairness).
409 head = qh->ring.prev;
420 * Advance this hardware endpoint's queue, completing the specified urb and
421 * advancing to either the next urb queued to that qh, or else invalidating
422 * that qh and advancing to the next qh scheduled after the current one.
424 * Context: caller owns controller lock, irqs are blocked
427 musb_advance_schedule(struct musb *pThis, struct urb *urb,
428 struct musb_hw_ep *pEnd, int is_in)
432 if (is_in || pEnd->bIsSharedFifo)
436 qh = musb_giveback(qh, urb, 0);
438 #ifdef CONFIG_USB_INVENTRA_DMA
439 /* REVISIT udelay reportedly works around issues in unmodified
440 * Mentor RTL before v1.5, where it doesn't disable the pull-up
441 * resisters in high speed mode. That causes signal reflection
442 * and errors because inter packet IDLE time vanishes.
444 * Yes, this delay makes DMA-OUT a bit slower than PIO. But
445 * without it, some devices are unusable. But there seem to be
446 * other issues too, at least on DaVinci; the delay improves
447 * some full speed cases, and being DMA-coupled is strange...
449 if (is_dma_capable() && !is_in && pEnd->tx_channel)
450 udelay(15); /* 10 usec ~= 1x 512byte packet */
453 if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
454 DBG(4, "... next ep%d %cX urb %p\n",
455 pEnd->bLocalEnd, is_in ? 'R' : 'T',
457 musb_start_urb(pThis, is_in, qh);
461 static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
463 /* we don't want fifo to fill itself again;
464 * ignore dma (various models),
465 * leave toggle alone (may not have been saved yet)
467 csr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_RXPKTRDY;
468 csr &= ~( MGC_M_RXCSR_H_REQPKT
469 | MGC_M_RXCSR_H_AUTOREQ
470 | MGC_M_RXCSR_AUTOCLEAR
473 /* write 2x to allow double buffering */
474 musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR, csr);
475 musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR, csr);
477 /* flush writebuffer */
478 return musb_readw(hw_ep->regs, MGC_O_HDRC_RXCSR);
482 * PIO RX for a packet (or part of it).
484 static u8 musb_host_packet_rx(struct musb *pThis, struct urb *pUrb,
485 u8 bEnd, u8 bIsochError)
493 struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
494 void __iomem *epio = pEnd->regs;
495 struct musb_qh *qh = pEnd->in_qh;
496 int nPipe = pUrb->pipe;
497 void *buffer = pUrb->transfer_buffer;
499 // MGC_SelectEnd(pBase, bEnd);
500 wRxCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
503 if (usb_pipeisoc(nPipe)) {
505 struct usb_iso_packet_descriptor *d;
512 d = pUrb->iso_frame_desc + qh->iso_idx;
513 pBuffer = buffer + d->offset;
515 if (wRxCount > length) {
520 DBG(2, "** OVERFLOW %d into %d\n", wRxCount, length);
524 pUrb->actual_length += length;
525 d->actual_length = length;
529 /* see if we are done */
530 bDone = (++qh->iso_idx >= pUrb->number_of_packets);
533 pBuffer = buffer + qh->offset;
534 length = pUrb->transfer_buffer_length - qh->offset;
535 if (wRxCount > length) {
536 if (pUrb->status == -EINPROGRESS)
537 pUrb->status = -EOVERFLOW;
538 DBG(2, "** OVERFLOW %d into %d\n", wRxCount, length);
542 pUrb->actual_length += length;
543 qh->offset += length;
545 /* see if we are done */
546 bDone = (pUrb->actual_length == pUrb->transfer_buffer_length)
547 || (wRxCount < qh->maxpacket)
548 || (pUrb->status != -EINPROGRESS);
550 && (pUrb->status == -EINPROGRESS)
551 && (pUrb->transfer_flags & URB_SHORT_NOT_OK)
552 && (pUrb->actual_length
553 < pUrb->transfer_buffer_length))
554 pUrb->status = -EREMOTEIO;
557 musb_read_fifo(pEnd, length, pBuffer);
559 wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
560 wCsr |= MGC_M_RXCSR_H_WZC_BITS;
561 if (unlikely(do_flush))
562 musb_h_flush_rxfifo(pEnd, wCsr);
564 /* REVISIT this assumes AUTOCLEAR is never set */
565 wCsr &= ~(MGC_M_RXCSR_RXPKTRDY | MGC_M_RXCSR_H_REQPKT);
567 wCsr |= MGC_M_RXCSR_H_REQPKT;
568 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
574 /* we don't always need to reinit a given side of an endpoint...
575 * when we do, use tx/rx reinit routine and then construct a new CSR
576 * to address data toggle, NYET, and DMA or PIO.
578 * it's possible that driver bugs (especially for DMA) or aborting a
579 * transfer might have left the endpoint busier than it should be.
580 * the busy/not-empty tests are basically paranoia.
583 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
587 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
588 * That always uses tx_reinit since ep0 repurposes TX register
589 * offsets; the initial SETUP packet is also a kind of OUT.
592 /* if programmed for Tx, put it in RX mode */
593 if (ep->bIsSharedFifo) {
594 csr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR);
595 if (csr & MGC_M_TXCSR_MODE) {
596 musb_h_tx_flush_fifo(ep);
597 musb_writew(ep->regs, MGC_O_HDRC_TXCSR,
598 MGC_M_TXCSR_FRCDATATOG);
600 /* clear mode (and everything else) to enable Rx */
601 musb_writew(ep->regs, MGC_O_HDRC_TXCSR, 0);
603 /* scrub all previous state, clearing toggle */
605 csr = musb_readw(ep->regs, MGC_O_HDRC_RXCSR);
606 if (csr & MGC_M_RXCSR_RXPKTRDY)
607 WARN("rx%d, packet/%d ready?\n", ep->bLocalEnd,
608 musb_readw(ep->regs, MGC_O_HDRC_RXCOUNT));
610 musb_h_flush_rxfifo(ep, MGC_M_RXCSR_CLRDATATOG);
613 /* target addr and (for multipoint) hub addr/port */
614 if (musb->bIsMultipoint) {
615 musb_writeb(ep->target_regs, MGC_O_HDRC_RXFUNCADDR,
617 musb_writeb(ep->target_regs, MGC_O_HDRC_RXHUBADDR,
619 musb_writeb(ep->target_regs, MGC_O_HDRC_RXHUBPORT,
622 musb_writeb(musb->pRegs, MGC_O_HDRC_FADDR, qh->addr_reg);
624 /* protocol/endpoint, interval/NAKlimit, i/o size */
625 musb_writeb(ep->regs, MGC_O_HDRC_RXTYPE, qh->type_reg);
626 musb_writeb(ep->regs, MGC_O_HDRC_RXINTERVAL, qh->intv_reg);
627 /* NOTE: bulk combining rewrites high bits of maxpacket */
628 musb_writew(ep->regs, MGC_O_HDRC_RXMAXP, qh->maxpacket);
635 * Program an HDRC endpoint as per the given URB
636 * Context: irqs blocked, controller lock held
638 static void musb_ep_program(struct musb *pThis, u8 bEnd,
639 struct urb *pUrb, unsigned int is_out,
640 u8 * pBuffer, u32 dwLength)
642 struct dma_controller *pDmaController;
643 struct dma_channel *pDmaChannel;
645 void __iomem *pBase = pThis->pRegs;
646 struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
647 void __iomem *epio = pEnd->regs;
651 if (!is_out || pEnd->bIsSharedFifo)
656 wPacketSize = qh->maxpacket;
658 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
659 "h_addr%02x h_port%02x bytes %d\n",
660 is_out ? "-->" : "<--",
661 bEnd, pUrb, pUrb->dev->speed,
662 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
663 qh->h_addr_reg, qh->h_port_reg,
666 MGC_SelectEnd(pBase, bEnd);
668 /* candidate for DMA? */
669 pDmaController = pThis->pDmaController;
670 if (is_dma_capable() && bEnd && pDmaController) {
671 pDmaChannel = is_out ? pEnd->tx_channel : pEnd->rx_channel;
673 pDmaChannel = pDmaController->channel_alloc(
674 pDmaController, pEnd, is_out);
676 pEnd->tx_channel = pDmaChannel;
678 pEnd->rx_channel = pDmaChannel;
683 /* make sure we clear DMAEnab, autoSet bits from previous run */
685 /* OUT/transmit/EP0 or IN/receive? */
691 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
693 /* disable interrupt in case we flush */
694 wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE);
695 musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << bEnd));
697 /* general endpoint setup */
701 /* ASSERT: TXCSR_DMAENAB was already cleared */
703 /* flush all old state, set default */
704 musb_h_tx_flush_fifo(pEnd);
705 csr &= ~(MGC_M_TXCSR_H_NAKTIMEOUT
706 | MGC_M_TXCSR_DMAMODE
707 | MGC_M_TXCSR_FRCDATATOG
708 | MGC_M_TXCSR_H_RXSTALL
709 | MGC_M_TXCSR_H_ERROR
710 | MGC_M_TXCSR_TXPKTRDY
712 csr |= MGC_M_TXCSR_MODE;
714 if (usb_gettoggle(pUrb->dev,
716 csr |= MGC_M_TXCSR_H_WR_DATATOGGLE
717 | MGC_M_TXCSR_H_DATATOGGLE;
719 csr |= MGC_M_TXCSR_CLRDATATOG;
721 /* twice in case of double packet buffering */
722 musb_writew(epio, MGC_O_HDRC_TXCSR, csr);
723 /* REVISIT may need to clear FLUSHFIFO ... */
724 musb_writew(epio, MGC_O_HDRC_TXCSR, csr);
725 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
727 /* endpoint 0: just flush */
728 musb_writew(epio, MGC_O_HDRC_CSR0,
729 wCsr | MGC_M_CSR0_FLUSHFIFO);
730 musb_writew(epio, MGC_O_HDRC_CSR0,
731 wCsr | MGC_M_CSR0_FLUSHFIFO);
734 /* target addr and (for multipoint) hub addr/port */
735 if (pThis->bIsMultipoint) {
737 MGC_BUSCTL_OFFSET(bEnd, MGC_O_HDRC_TXFUNCADDR),
740 MGC_BUSCTL_OFFSET(bEnd, MGC_O_HDRC_TXHUBADDR),
743 MGC_BUSCTL_OFFSET(bEnd, MGC_O_HDRC_TXHUBPORT),
745 /* FIXME if !bEnd, do the same for RX ... */
747 musb_writeb(pBase, MGC_O_HDRC_FADDR, qh->addr_reg);
749 /* protocol/endpoint/interval/NAKlimit */
751 musb_writeb(epio, MGC_O_HDRC_TXTYPE, qh->type_reg);
752 if (can_bulk_split(pThis, qh->type))
753 musb_writew(epio, MGC_O_HDRC_TXMAXP,
755 | ((pEnd->wMaxPacketSizeTx /
756 wPacketSize) - 1) << 11);
758 musb_writew(epio, MGC_O_HDRC_TXMAXP,
760 musb_writeb(epio, MGC_O_HDRC_TXINTERVAL, qh->intv_reg);
762 musb_writeb(epio, MGC_O_HDRC_NAKLIMIT0, qh->intv_reg);
763 if (pThis->bIsMultipoint)
764 musb_writeb(epio, MGC_O_HDRC_TYPE0,
768 if (can_bulk_split(pThis, qh->type))
769 wLoadCount = min((u32) pEnd->wMaxPacketSizeTx,
772 wLoadCount = min((u32) wPacketSize, dwLength);
774 #ifdef CONFIG_USB_INVENTRA_DMA
777 /* clear previous state */
778 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
779 wCsr &= ~(MGC_M_TXCSR_AUTOSET
780 | MGC_M_TXCSR_DMAMODE
781 | MGC_M_TXCSR_DMAENAB);
782 wCsr |= MGC_M_TXCSR_MODE;
783 musb_writew(epio, MGC_O_HDRC_TXCSR,
784 wCsr | MGC_M_TXCSR_MODE);
786 qh->segsize = min(dwLength, pDmaChannel->dwMaxLength);
788 if (qh->segsize <= wPacketSize)
789 pDmaChannel->bDesiredMode = 0;
791 pDmaChannel->bDesiredMode = 1;
794 if (pDmaChannel->bDesiredMode == 0) {
795 wCsr &= ~(MGC_M_TXCSR_AUTOSET
796 | MGC_M_TXCSR_DMAMODE);
797 wCsr |= (MGC_M_TXCSR_DMAENAB);
798 // against programming guide
800 wCsr |= (MGC_M_TXCSR_AUTOSET
801 | MGC_M_TXCSR_DMAENAB
802 | MGC_M_TXCSR_DMAMODE);
804 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
806 bDmaOk = pDmaController->channel_program(
807 pDmaChannel, wPacketSize,
808 pDmaChannel->bDesiredMode,
814 pDmaController->channel_release(pDmaChannel);
816 pEnd->tx_channel = NULL;
818 pEnd->rx_channel = NULL;
824 /* candidate for DMA */
825 if ((is_cppi_enabled() || tusb_dma_omap()) && pDmaChannel) {
827 /* program endpoint CSRs first, then setup DMA.
828 * assume CPPI setup succeeds.
829 * defer enabling dma.
831 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
832 wCsr &= ~(MGC_M_TXCSR_AUTOSET
833 | MGC_M_TXCSR_DMAMODE
834 | MGC_M_TXCSR_DMAENAB);
835 wCsr |= MGC_M_TXCSR_MODE;
836 musb_writew(epio, MGC_O_HDRC_TXCSR,
837 wCsr | MGC_M_TXCSR_MODE);
839 pDmaChannel->dwActualLength = 0L;
840 qh->segsize = dwLength;
842 /* TX uses "rndis" mode automatically, but needs help
843 * to identify the zero-length-final-packet case.
845 bDmaOk = pDmaController->channel_program(
846 pDmaChannel, wPacketSize,
847 (pUrb->transfer_flags
855 pDmaController->channel_release(pDmaChannel);
856 pDmaChannel = pEnd->tx_channel = NULL;
858 /* REVISIT there's an error path here that
859 * needs handling: can't do dma, but
860 * there's no pio buffer address...
866 /* ASSERT: TXCSR_DMAENAB was already cleared */
868 /* PIO to load FIFO */
869 qh->segsize = wLoadCount;
870 musb_write_fifo(pEnd, wLoadCount, pBuffer);
871 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
872 wCsr &= ~(MGC_M_TXCSR_DMAENAB
873 | MGC_M_TXCSR_DMAMODE
874 | MGC_M_TXCSR_AUTOSET);
876 wCsr |= MGC_M_TXCSR_MODE;
879 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
882 /* re-enable interrupt */
883 musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE);
889 if (pEnd->rx_reinit) {
890 musb_rx_reinit(pThis, qh, pEnd);
892 /* init new state: toggle and NYET, maybe DMA later */
893 if (usb_gettoggle(pUrb->dev, qh->epnum, 0))
894 csr = MGC_M_RXCSR_H_WR_DATATOGGLE
895 | MGC_M_RXCSR_H_DATATOGGLE;
898 if (qh->type == USB_ENDPOINT_XFER_INT)
899 csr |= MGC_M_RXCSR_DISNYET;
902 csr = musb_readw(pEnd->regs, MGC_O_HDRC_RXCSR);
904 if (csr & (MGC_M_RXCSR_RXPKTRDY
905 | MGC_M_RXCSR_DMAENAB
906 | MGC_M_RXCSR_H_REQPKT))
907 ERR("broken !rx_reinit, ep%d csr %04x\n",
908 pEnd->bLocalEnd, csr);
910 /* scrub any stale state, leaving toggle alone */
911 csr &= MGC_M_RXCSR_DISNYET;
914 /* kick things off */
916 if ((is_cppi_enabled() || tusb_dma_omap()) && pDmaChannel) {
917 /* candidate for DMA */
919 pDmaChannel->dwActualLength = 0L;
920 qh->segsize = dwLength;
922 /* AUTOREQ is in a DMA register */
923 musb_writew(pEnd->regs, MGC_O_HDRC_RXCSR, csr);
924 csr = musb_readw(pEnd->regs,
927 /* unless caller treats short rx transfers as
928 * errors, we dare not queue multiple transfers.
930 bDmaOk = pDmaController->channel_program(
931 pDmaChannel, wPacketSize,
932 !(pUrb->transfer_flags
937 pDmaController->channel_release(
939 pDmaChannel = pEnd->rx_channel = NULL;
941 csr |= MGC_M_RXCSR_DMAENAB;
945 csr |= MGC_M_RXCSR_H_REQPKT;
946 DBG(7, "RXCSR%d := %04x\n", bEnd, csr);
947 musb_writew(pEnd->regs, MGC_O_HDRC_RXCSR, csr);
948 csr = musb_readw(pEnd->regs, MGC_O_HDRC_RXCSR);
954 * Service the default endpoint (ep0) as host.
955 * Return TRUE until it's time to start the status stage.
957 static int musb_h_ep0_continue(struct musb *pThis,
958 u16 wCount, struct urb *pUrb)
961 u8 *pFifoDest = NULL;
963 struct musb_hw_ep *pEnd = pThis->control_ep;
964 struct musb_qh *qh = pEnd->in_qh;
965 struct usb_ctrlrequest *pRequest;
967 switch (pThis->bEnd0Stage) {
969 pFifoDest = pUrb->transfer_buffer + pUrb->actual_length;
970 wFifoCount = min(wCount, ((u16) (pUrb->transfer_buffer_length
971 - pUrb->actual_length)));
972 if (wFifoCount < wCount)
973 pUrb->status = -EOVERFLOW;
975 musb_read_fifo(pEnd, wFifoCount, pFifoDest);
977 pUrb->actual_length += wFifoCount;
978 if (wCount < qh->maxpacket) {
979 /* always terminate on short read; it's
980 * rarely reported as an error.
982 } else if (pUrb->actual_length <
983 pUrb->transfer_buffer_length)
987 pRequest = (struct usb_ctrlrequest *) pUrb->setup_packet;
989 if (!pRequest->wLength) {
990 DBG(4, "start no-DATA\n");
992 } else if (pRequest->bRequestType & USB_DIR_IN) {
993 DBG(4, "start IN-DATA\n");
994 pThis->bEnd0Stage = MGC_END0_IN;
998 DBG(4, "start OUT-DATA\n");
999 pThis->bEnd0Stage = MGC_END0_OUT;
1004 wFifoCount = min(qh->maxpacket, ((u16)
1005 (pUrb->transfer_buffer_length
1006 - pUrb->actual_length)));
1009 pFifoDest = (u8 *) (pUrb->transfer_buffer
1010 + pUrb->actual_length);
1011 DBG(3, "Sending %d bytes to %p\n",
1012 wFifoCount, pFifoDest);
1013 musb_write_fifo(pEnd, wFifoCount, pFifoDest);
1015 pUrb->actual_length += wFifoCount;
1020 ERR("bogus ep0 stage %d\n", pThis->bEnd0Stage);
1028 * Handle default endpoint interrupt as host. Only called in IRQ time
1029 * from the LinuxIsr() interrupt service routine.
1031 * called with controller irqlocked
1033 irqreturn_t musb_h_ep0_irq(struct musb *pThis)
1036 u16 wCsrVal, wCount;
1038 void __iomem *pBase = pThis->pRegs;
1039 struct musb_hw_ep *pEnd = pThis->control_ep;
1040 void __iomem *epio = pEnd->regs;
1041 struct musb_qh *qh = pEnd->in_qh;
1042 u8 bComplete = FALSE;
1043 irqreturn_t retval = IRQ_NONE;
1045 /* ep0 only has one queue, "in" */
1046 pUrb = next_urb(qh);
1048 MGC_SelectEnd(pBase, 0);
1049 wCsrVal = musb_readw(epio, MGC_O_HDRC_CSR0);
1050 wCount = (wCsrVal & MGC_M_CSR0_RXPKTRDY)
1051 ? musb_readb(epio, MGC_O_HDRC_COUNT0)
1054 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1055 wCsrVal, qh, wCount, pUrb, pThis->bEnd0Stage);
1057 /* if we just did status stage, we are done */
1058 if (MGC_END0_STATUS == pThis->bEnd0Stage) {
1059 retval = IRQ_HANDLED;
1063 /* prepare status */
1064 if (wCsrVal & MGC_M_CSR0_H_RXSTALL) {
1065 DBG(6, "STALLING ENDPOINT\n");
1068 } else if (wCsrVal & MGC_M_CSR0_H_ERROR) {
1069 DBG(2, "no response, csr0 %04x\n", wCsrVal);
1072 } else if (wCsrVal & MGC_M_CSR0_H_NAKTIMEOUT) {
1073 DBG(2, "control NAK timeout\n");
1075 /* NOTE: this code path would be a good place to PAUSE a
1076 * control transfer, if another one is queued, so that
1077 * ep0 is more likely to stay busy.
1079 * if (qh->ring.next != &musb->control), then
1080 * we have a candidate... NAKing is *NOT* an error
1082 musb_writew(epio, MGC_O_HDRC_CSR0, 0);
1083 retval = IRQ_HANDLED;
1087 DBG(6, "aborting\n");
1088 retval = IRQ_HANDLED;
1090 pUrb->status = status;
1093 /* use the proper sequence to abort the transfer */
1094 if (wCsrVal & MGC_M_CSR0_H_REQPKT) {
1095 wCsrVal &= ~MGC_M_CSR0_H_REQPKT;
1096 musb_writew(epio, MGC_O_HDRC_CSR0, wCsrVal);
1097 wCsrVal &= ~MGC_M_CSR0_H_NAKTIMEOUT;
1098 musb_writew(epio, MGC_O_HDRC_CSR0, wCsrVal);
1100 wCsrVal |= MGC_M_CSR0_FLUSHFIFO;
1101 musb_writew(epio, MGC_O_HDRC_CSR0, wCsrVal);
1102 musb_writew(epio, MGC_O_HDRC_CSR0, wCsrVal);
1103 wCsrVal &= ~MGC_M_CSR0_H_NAKTIMEOUT;
1104 musb_writew(epio, MGC_O_HDRC_CSR0, wCsrVal);
1107 musb_writeb(epio, MGC_O_HDRC_NAKLIMIT0, 0);
1110 musb_writew(epio, MGC_O_HDRC_CSR0, 0);
1113 if (unlikely(!pUrb)) {
1114 /* stop endpoint since we have no place for its data, this
1115 * SHOULD NEVER HAPPEN! */
1116 ERR("no URB for end 0\n");
1118 musb_writew(epio, MGC_O_HDRC_CSR0, MGC_M_CSR0_FLUSHFIFO);
1119 musb_writew(epio, MGC_O_HDRC_CSR0, MGC_M_CSR0_FLUSHFIFO);
1120 musb_writew(epio, MGC_O_HDRC_CSR0, 0);
1126 /* call common logic and prepare response */
1127 if (musb_h_ep0_continue(pThis, wCount, pUrb)) {
1128 /* more packets required */
1129 wCsrVal = (MGC_END0_IN == pThis->bEnd0Stage)
1130 ? MGC_M_CSR0_H_REQPKT : MGC_M_CSR0_TXPKTRDY;
1132 /* data transfer complete; perform status phase */
1133 if (usb_pipeout(pUrb->pipe)
1134 || !pUrb->transfer_buffer_length)
1135 wCsrVal = MGC_M_CSR0_H_STATUSPKT
1136 | MGC_M_CSR0_H_REQPKT;
1138 wCsrVal = MGC_M_CSR0_H_STATUSPKT
1139 | MGC_M_CSR0_TXPKTRDY;
1141 /* flag status stage */
1142 pThis->bEnd0Stage = MGC_END0_STATUS;
1144 DBG(5, "ep0 STATUS, csr %04x\n", wCsrVal);
1147 musb_writew(epio, MGC_O_HDRC_CSR0, wCsrVal);
1148 retval = IRQ_HANDLED;
1150 pThis->bEnd0Stage = MGC_END0_IDLE;
1152 /* call completion handler if done */
1154 musb_advance_schedule(pThis, pUrb, pEnd, 1);
1160 #ifdef CONFIG_USB_INVENTRA_DMA
1162 /* Host side TX (OUT) using Mentor DMA works as follows:
1164 - if queue was empty, Program Endpoint
1165 - ... which starts DMA to fifo in mode 1 or 0
1167 DMA Isr (transfer complete) -> TxAvail()
1168 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1169 only in musb_cleanup_urb)
1170 - TxPktRdy has to be set in mode 0 or for
1171 short packets in mode 1.
1176 /* Service a Tx-Available or dma completion irq for the endpoint */
1177 void musb_host_tx(struct musb *pThis, u8 bEnd)
1185 struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
1186 void __iomem *epio = pEnd->regs;
1187 struct musb_qh *qh = pEnd->out_qh;
1189 void __iomem *pBase = pThis->pRegs;
1190 struct dma_channel *dma;
1192 pUrb = next_urb(qh);
1194 MGC_SelectEnd(pBase, bEnd);
1195 wTxCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
1197 /* with CPPI, DMA sometimes triggers "extra" irqs */
1199 DBG(4, "extra TX%d ready, csr %04x\n", bEnd, wTxCsrVal);
1204 dma = is_dma_capable() ? pEnd->tx_channel : NULL;
1205 DBG(4, "OUT/TX%d end, csr %04x%s\n", bEnd, wTxCsrVal,
1206 dma ? ", dma" : "");
1208 /* check for errors */
1209 if (wTxCsrVal & MGC_M_TXCSR_H_RXSTALL) {
1210 /* dma was disabled, fifo flushed */
1211 DBG(3, "TX end %d stall\n", bEnd);
1213 /* stall; record URB status */
1216 } else if (wTxCsrVal & MGC_M_TXCSR_H_ERROR) {
1217 /* (NON-ISO) dma was disabled, fifo flushed */
1218 DBG(3, "TX 3strikes on ep=%d\n", bEnd);
1220 status = -ETIMEDOUT;
1222 } else if (wTxCsrVal & MGC_M_TXCSR_H_NAKTIMEOUT) {
1223 DBG(6, "TX end=%d device not responding\n", bEnd);
1225 /* NOTE: this code path would be a good place to PAUSE a
1226 * transfer, if there's some other (nonperiodic) tx urb
1227 * that could use this fifo. (dma complicates it...)
1229 * if (bulk && qh->ring.next != &musb->out_bulk), then
1230 * we have a candidate... NAKing is *NOT* an error
1232 MGC_SelectEnd(pBase, bEnd);
1233 musb_writew(epio, MGC_O_HDRC_CSR0,
1234 MGC_M_TXCSR_H_WZC_BITS
1235 | MGC_M_TXCSR_TXPKTRDY);
1240 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
1241 dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
1242 (void) pThis->pDmaController->channel_abort(dma);
1245 /* do the proper sequence to abort the transfer in the
1246 * usb core; the dma engine should already be stopped.
1248 musb_h_tx_flush_fifo(pEnd);
1249 wTxCsrVal &= ~(MGC_M_TXCSR_AUTOSET
1250 | MGC_M_TXCSR_DMAENAB
1251 | MGC_M_TXCSR_H_ERROR
1252 | MGC_M_TXCSR_H_RXSTALL
1253 | MGC_M_TXCSR_H_NAKTIMEOUT
1256 MGC_SelectEnd(pBase, bEnd);
1257 musb_writew(epio, MGC_O_HDRC_TXCSR, wTxCsrVal);
1258 /* REVISIT may need to clear FLUSHFIFO ... */
1259 musb_writew(epio, MGC_O_HDRC_TXCSR, wTxCsrVal);
1260 musb_writeb(epio, MGC_O_HDRC_TXINTERVAL, 0);
1265 /* second cppi case */
1266 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
1267 DBG(4, "extra TX%d ready, csr %04x\n", bEnd, wTxCsrVal);
1272 /* REVISIT this looks wrong... */
1273 if (!status || dma || usb_pipeisoc(nPipe)) {
1275 #ifdef CONFIG_USB_INVENTRA_DMA
1276 /* mode 0 or last short packet)
1277 * REVISIT how about ZLP?
1279 if ((dma->bDesiredMode == 0)
1280 || (dma->dwActualLength
1281 & (qh->maxpacket - 1))) {
1282 /* Send out the packet first ... */
1283 MGC_SelectEnd(pBase, bEnd);
1284 musb_writew(epio, MGC_O_HDRC_TXCSR,
1285 MGC_M_TXCSR_TXPKTRDY);
1289 wLength = dma->dwActualLength;
1291 wLength = qh->segsize;
1292 qh->offset += wLength;
1294 if (usb_pipeisoc(nPipe)) {
1295 struct usb_iso_packet_descriptor *d;
1297 d = pUrb->iso_frame_desc + qh->iso_idx;
1298 d->actual_length = qh->segsize;
1299 if (++qh->iso_idx >= pUrb->number_of_packets) {
1303 pBuffer = pUrb->transfer_buffer + d->offset;
1304 wLength = d->length;
1309 /* see if we need to send more data, or ZLP */
1310 if (qh->segsize < qh->maxpacket)
1312 else if (qh->offset == pUrb->transfer_buffer_length
1313 && !(pUrb-> transfer_flags
1317 pBuffer = pUrb->transfer_buffer
1319 wLength = pUrb->transfer_buffer_length
1325 /* urb->status != -EINPROGRESS means request has been faulted,
1326 * so we must abort this transfer after cleanup
1328 if (pUrb->status != -EINPROGRESS) {
1331 status = pUrb->status;
1336 pUrb->status = status;
1337 pUrb->actual_length = qh->offset;
1338 musb_advance_schedule(pThis, pUrb, pEnd, USB_DIR_OUT);
1340 } else if (!(wTxCsrVal & MGC_M_TXCSR_DMAENAB)) {
1341 // WARN_ON(!pBuffer);
1343 /* REVISIT: some docs say that when pEnd->tx_double_buffered,
1344 * (and presumably, fifo is not half-full) we should write TWO
1345 * packets before updating TXCSR ... other docs disagree ...
1347 /* PIO: start next packet in this URB */
1348 wLength = min(qh->maxpacket, (u16) wLength);
1349 musb_write_fifo(pEnd, wLength, pBuffer);
1350 qh->segsize = wLength;
1352 MGC_SelectEnd(pBase, bEnd);
1353 musb_writew(epio, MGC_O_HDRC_TXCSR,
1354 MGC_M_TXCSR_H_WZC_BITS | MGC_M_TXCSR_TXPKTRDY);
1356 DBG(1, "not complete, but dma enabled?\n");
1363 #ifdef CONFIG_USB_INVENTRA_DMA
1365 /* Host side RX (IN) using Mentor DMA works as follows:
1367 - if queue was empty, ProgramEndpoint
1368 - first IN token is sent out (by setting ReqPkt)
1369 LinuxIsr -> RxReady()
1370 /\ => first packet is received
1371 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1372 | -> DMA Isr (transfer complete) -> RxReady()
1373 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1374 | - if urb not complete, send next IN token (ReqPkt)
1375 | | else complete urb.
1377 ---------------------------
1379 * Nuances of mode 1:
1380 * For short packets, no ack (+RxPktRdy) is sent automatically
1381 * (even if AutoClear is ON)
1382 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1383 * automatically => major problem, as collecting the next packet becomes
1384 * difficult. Hence mode 1 is not used.
1387 * All we care about at this driver level is that
1388 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1389 * (b) termination conditions are: short RX, or buffer full;
1390 * (c) fault modes include
1391 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1392 * (and that endpoint's dma queue stops immediately)
1393 * - overflow (full, PLUS more bytes in the terminal packet)
1395 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1396 * thus be a great candidate for using mode 1 ... for all but the
1397 * last packet of one URB's transfer.
1403 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1404 * and high-bandwidth IN transfer cases.
1406 void musb_host_rx(struct musb *pThis, u8 bEnd)
1409 struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd;
1410 void __iomem *epio = pEnd->regs;
1411 struct musb_qh *qh = pEnd->in_qh;
1413 void __iomem *pBase = pThis->pRegs;
1415 u16 wRxCsrVal, wVal;
1416 u8 bIsochError = FALSE;
1419 struct dma_channel *dma;
1421 MGC_SelectEnd(pBase, bEnd);
1423 pUrb = next_urb(qh);
1424 dma = is_dma_capable() ? pEnd->rx_channel : NULL;
1428 wVal = wRxCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
1430 if (unlikely(!pUrb)) {
1431 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1432 * usbtest #11 (unlinks) triggers it regularly, sometimes
1433 * with fifo full. (Only with DMA??)
1435 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", bEnd, wVal,
1436 musb_readw(epio, MGC_O_HDRC_RXCOUNT));
1437 musb_h_flush_rxfifo(pEnd, MGC_M_RXCSR_CLRDATATOG);
1443 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zd)\n",
1444 bEnd, wRxCsrVal, pUrb->actual_length,
1445 dma ? dma->dwActualLength : 0);
1447 /* check for errors, concurrent stall & unlink is not really
1449 if (wRxCsrVal & MGC_M_RXCSR_H_RXSTALL) {
1450 DBG(3, "RX end %d STALL\n", bEnd);
1452 /* stall; record URB status */
1455 } else if (wRxCsrVal & MGC_M_RXCSR_H_ERROR) {
1456 DBG(3, "end %d RX proto error\n", bEnd);
1459 musb_writeb(epio, MGC_O_HDRC_RXINTERVAL, 0);
1461 } else if (wRxCsrVal & MGC_M_RXCSR_DATAERROR) {
1463 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1464 /* NOTE this code path would be a good place to PAUSE a
1465 * transfer, if there's some other (nonperiodic) rx urb
1466 * that could use this fifo. (dma complicates it...)
1468 * if (bulk && qh->ring.next != &musb->in_bulk), then
1469 * we have a candidate... NAKing is *NOT* an error
1471 DBG(6, "RX end %d NAK timeout\n", bEnd);
1472 MGC_SelectEnd(pBase, bEnd);
1473 musb_writew(epio, MGC_O_HDRC_RXCSR,
1474 MGC_M_RXCSR_H_WZC_BITS
1475 | MGC_M_RXCSR_H_REQPKT);
1479 DBG(4, "RX end %d ISO data error\n", bEnd);
1480 /* packet error reported later */
1485 /* faults abort the transfer */
1487 /* clean up dma and collect transfer count */
1488 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
1489 dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
1490 (void) pThis->pDmaController->channel_abort(dma);
1491 xfer_len = dma->dwActualLength;
1493 musb_h_flush_rxfifo(pEnd, 0);
1494 musb_writeb(epio, MGC_O_HDRC_RXINTERVAL, 0);
1499 if (unlikely(dma_channel_status(dma) == MGC_DMA_STATUS_BUSY)) {
1500 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1501 ERR("RX%d dma busy, csr %04x\n", bEnd, wRxCsrVal);
1505 /* thorough shutdown for now ... given more precise fault handling
1506 * and better queueing support, we might keep a DMA pipeline going
1507 * while processing this irq for earlier completions.
1510 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1512 #ifndef CONFIG_USB_INVENTRA_DMA
1513 if (wRxCsrVal & MGC_M_RXCSR_H_REQPKT) {
1514 /* REVISIT this happened for a while on some short reads...
1515 * the cleanup still needs investigation... looks bad...
1516 * and also duplicates dma cleanup code above ... plus,
1517 * shouldn't this be the "half full" double buffer case?
1519 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
1520 dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
1521 (void) pThis->pDmaController->channel_abort(dma);
1522 xfer_len = dma->dwActualLength;
1526 DBG(2, "RXCSR%d %04x, reqpkt, len %zd%s\n", bEnd, wRxCsrVal,
1527 xfer_len, dma ? ", dma" : "");
1528 wRxCsrVal &= ~MGC_M_RXCSR_H_REQPKT;
1530 MGC_SelectEnd(pBase, bEnd);
1531 musb_writew(epio, MGC_O_HDRC_RXCSR,
1532 MGC_M_RXCSR_H_WZC_BITS | wRxCsrVal);
1535 if (dma && (wRxCsrVal & MGC_M_RXCSR_DMAENAB)) {
1536 xfer_len = dma->dwActualLength;
1538 wVal &= ~(MGC_M_RXCSR_DMAENAB
1539 | MGC_M_RXCSR_H_AUTOREQ
1540 | MGC_M_RXCSR_AUTOCLEAR
1541 | MGC_M_RXCSR_RXPKTRDY);
1542 musb_writew(pEnd->regs, MGC_O_HDRC_RXCSR, wVal);
1544 #ifdef CONFIG_USB_INVENTRA_DMA
1545 pUrb->actual_length += xfer_len;
1546 qh->offset += xfer_len;
1548 /* bDone if pUrb buffer is full or short packet is recd */
1549 bDone = (pUrb->actual_length >= pUrb->transfer_buffer_length)
1550 || (dma->dwActualLength & (qh->maxpacket - 1));
1552 /* send IN token for next packet, without AUTOREQ */
1554 wVal |= MGC_M_RXCSR_H_REQPKT;
1555 musb_writew(epio, MGC_O_HDRC_RXCSR,
1556 MGC_M_RXCSR_H_WZC_BITS | wVal);
1559 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", bEnd,
1560 bDone ? "off" : "reset",
1561 musb_readw(epio, MGC_O_HDRC_RXCSR),
1562 musb_readw(epio, MGC_O_HDRC_RXCOUNT));
1566 } else if (pUrb->status == -EINPROGRESS) {
1567 /* if no errors, be sure a packet is ready for unloading */
1568 if (unlikely(!(wRxCsrVal & MGC_M_RXCSR_RXPKTRDY))) {
1570 ERR("Rx interrupt with no errors or packet!\n");
1572 // FIXME this is another "SHOULD NEVER HAPPEN"
1575 /* do the proper sequence to abort the transfer */
1576 MGC_SelectEnd(pBase, bEnd);
1577 wVal &= ~MGC_M_RXCSR_H_REQPKT;
1578 musb_writew(epio, MGC_O_HDRC_RXCSR, wVal);
1582 /* we are expecting IN packets */
1583 #ifdef CONFIG_USB_INVENTRA_DMA
1585 struct dma_controller *c;
1589 wRxCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
1591 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1594 + pUrb->actual_length,
1596 pUrb->transfer_buffer_length);
1598 c = pThis->pDmaController;
1600 dma->bDesiredMode = 0;
1602 /* because of the issue below, mode 1 will
1603 * only rarely behave with correct semantics.
1605 if ((pUrb->transfer_flags &
1607 && (pUrb->transfer_buffer_length -
1608 pUrb->actual_length)
1610 dma->bDesiredMode = 1;
1613 /* Disadvantage of using mode 1:
1614 * It's basically usable only for mass storage class; essentially all
1615 * other protocols also terminate transfers on short packets.
1618 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1619 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1620 * to use the extra IN token to grab the last packet using mode 0, then
1621 * the problem is that you cannot be sure when the device will send the
1622 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1623 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1624 * transfer, while sometimes it is recd just a little late so that if you
1625 * try to configure for mode 0 soon after the mode 1 transfer is
1626 * completed, you will find rxcount 0. Okay, so you might think why not
1627 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1630 wVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
1631 wVal &= ~MGC_M_RXCSR_H_REQPKT;
1633 if (dma->bDesiredMode == 0)
1634 wVal &= ~MGC_M_RXCSR_H_AUTOREQ;
1636 wVal |= MGC_M_RXCSR_H_AUTOREQ;
1637 wVal |= MGC_M_RXCSR_AUTOCLEAR | MGC_M_RXCSR_DMAENAB;
1639 musb_writew(epio, MGC_O_HDRC_RXCSR,
1640 MGC_M_RXCSR_H_WZC_BITS | wVal);
1642 /* REVISIT if when actual_length != 0,
1643 * transfer_buffer_length needs to be
1646 status = c->channel_program(
1650 + pUrb->actual_length,
1651 (dma->bDesiredMode == 0)
1653 : pUrb->transfer_buffer_length);
1656 c->channel_release(dma);
1657 dma = pEnd->rx_channel = NULL;
1658 /* REVISIT reset CSR */
1661 #endif /* Mentor DMA */
1664 bDone = musb_host_packet_rx(pThis, pUrb,
1666 DBG(6, "read %spacket\n", bDone ? "last " : "");
1671 pUrb->actual_length += xfer_len;
1672 qh->offset += xfer_len;
1674 if (pUrb->status == -EINPROGRESS)
1675 pUrb->status = status;
1676 musb_advance_schedule(pThis, pUrb, pEnd, USB_DIR_IN);
1680 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1681 * the software schedule associates multiple such nodes with a given
1682 * host side hardware endpoint + direction; scheduling may activate
1683 * that hardware endpoint.
1685 static int musb_schedule(
1693 struct musb_hw_ep *hw_ep = NULL;
1694 struct list_head *head = NULL;
1696 /* use fixed hardware for control and bulk */
1698 case USB_ENDPOINT_XFER_CONTROL:
1699 head = &musb->control;
1700 hw_ep = musb->control_ep;
1702 case USB_ENDPOINT_XFER_BULK:
1703 hw_ep = musb->bulk_ep;
1705 head = &musb->in_bulk;
1707 head = &musb->out_bulk;
1711 idle = list_empty(head);
1712 list_add_tail(&qh->ring, head);
1716 /* else, periodic transfers get muxed to other endpoints */
1718 /* FIXME this doesn't consider direction, so it can only
1719 * work for one half of the endpoint hardware, and assumes
1720 * the previous cases handled all non-shared endpoints...
1723 /* we know this qh hasn't been scheduled, so all we need to do
1724 * is choose which hardware endpoint to put it on ...
1726 * REVISIT what we really want here is a regular schedule tree
1727 * like e.g. OHCI uses, but for now musb->periodic is just an
1728 * array of the _single_ logical endpoint associated with a
1729 * given physical one (identity mapping logical->physical).
1731 * that simplistic approach makes TT scheduling a lot simpler;
1732 * there is none, and thus none of its complexity...
1737 for (nEnd = 1; nEnd < musb->bEndCount; nEnd++) {
1740 if (musb->periodic[nEnd])
1742 hw_ep = &musb->aLocalEnd[nEnd];
1743 if (hw_ep == musb->bulk_ep)
1747 diff = hw_ep->wMaxPacketSizeRx - qh->maxpacket;
1749 diff = hw_ep->wMaxPacketSizeTx - qh->maxpacket;
1751 if (diff > 0 && wBestDiff > diff) {
1760 hw_ep = musb->aLocalEnd + nBestEnd;
1761 musb->periodic[nBestEnd] = qh;
1762 DBG(4, "qh %p periodic slot %d\n", qh, nBestEnd);
1765 qh->hep->hcpriv = qh;
1767 musb_start_urb(musb, is_in, qh);
1771 static int musb_urb_enqueue(
1772 struct usb_hcd *hcd,
1773 struct usb_host_endpoint *hep,
1777 unsigned long flags;
1778 struct musb *musb = hcd_to_musb(hcd);
1779 struct musb_qh *qh = hep->hcpriv;
1780 struct usb_endpoint_descriptor *epd = &hep->desc;
1785 /* host role must be active */
1786 if (!is_host_active(musb) || !musb->is_active)
1789 /* DMA mapping was already done, if needed, and this urb is on
1790 * hep->urb_list ... so there's little to do unless hep wasn't
1791 * yet scheduled onto a live qh.
1793 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1794 * disabled, testing for empty qh->ring and avoiding qh setup costs
1795 * except for the first urb queued after a config change.
1802 /* Allocate and initialize qh, minimizing the work done each time
1803 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1805 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1806 * for bugs in other kernel code to break this driver...
1808 qh = kzalloc(sizeof *qh, mem_flags);
1814 INIT_LIST_HEAD(&qh->ring);
1817 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1819 /* no high bandwidth support yet */
1820 if (qh->maxpacket & ~0x7ff) {
1825 qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1826 qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
1828 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1829 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1831 /* precompute rxtype/txtype/type0 register */
1832 type_reg = (qh->type << 4) | qh->epnum;
1833 switch (urb->dev->speed) {
1837 case USB_SPEED_FULL:
1843 qh->type_reg = type_reg;
1845 /* precompute rxinterval/txinterval register */
1846 interval = min((u8)16, epd->bInterval); /* log encoding */
1848 case USB_ENDPOINT_XFER_INT:
1849 /* fullspeed uses linear encoding */
1850 if (USB_SPEED_FULL == urb->dev->speed) {
1851 interval = epd->bInterval;
1856 case USB_ENDPOINT_XFER_ISOC:
1857 /* iso always uses log encoding */
1860 /* REVISIT we actually want to use NAK limits, hinting to the
1861 * transfer scheduling logic to try some other qh, e.g. try
1864 * interval = (USB_SPEED_HIGH == pUrb->dev->speed) ? 16 : 2;
1866 * The downside of disabling this is that transfer scheduling
1867 * gets VERY unfair for nonperiodic transfers; a misbehaving
1868 * peripheral could make that hurt. Or for reads, one that's
1869 * perfectly normal: network and other drivers keep reads
1870 * posted at all times, having one pending for a week should
1871 * be perfectly safe.
1873 * The upside of disabling it is avoidng transfer scheduling
1874 * code to put this aside for while.
1878 qh->intv_reg = interval;
1880 /* precompute addressing for external hub/tt ports */
1881 if (musb->bIsMultipoint) {
1882 struct usb_device *parent = urb->dev->parent;
1884 if (parent != hcd->self.root_hub) {
1885 qh->h_addr_reg = (u8) parent->devnum;
1887 /* set up tt info if needed */
1889 qh->h_port_reg = (u8) urb->dev->ttport;
1890 qh->h_addr_reg |= 0x80;
1895 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1896 * until we get real dma queues (with an entry for each urb/buffer),
1897 * we only have work to do in the former case.
1899 spin_lock_irqsave(&musb->Lock, flags);
1901 /* some concurrent activity submitted another urb to hep...
1902 * odd, rare, error prone, but legal.
1907 status = musb_schedule(musb, qh,
1908 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
1912 /* FIXME set urb->start_frame for iso/intr, it's tested in
1913 * musb_start_urb(), but otherwise only konicawc cares ...
1916 spin_unlock_irqrestore(&musb->Lock, flags);
1926 * abort a transfer that's at the head of a hardware queue.
1927 * called with controller locked, irqs blocked
1928 * that hardware queue advances to the next transfer, unless prevented
1930 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
1932 struct musb_hw_ep *ep = qh->hw_ep;
1933 void __iomem *epio = ep->regs;
1934 unsigned hw_end = ep->bLocalEnd;
1935 void __iomem *regs = ep->musb->pRegs;
1939 MGC_SelectEnd(regs, hw_end);
1941 if (is_dma_capable()) {
1942 struct dma_channel *dma;
1944 dma = is_in ? ep->rx_channel : ep->tx_channel;
1946 status = ep->musb->pDmaController->channel_abort(dma);
1948 "abort %cX%d DMA for urb %p --> %d\n",
1949 is_in ? 'R' : 'T', ep->bLocalEnd,
1951 urb->actual_length += dma->dwActualLength;
1955 /* turn off DMA requests, discard state, stop polling ... */
1957 /* giveback saves bulk toggle */
1958 csr = musb_h_flush_rxfifo(ep, 0);
1960 /* REVISIT we still get an irq; should likely clear the
1961 * endpoint's irq status here to avoid bogus irqs.
1962 * clearing that status is platform-specific...
1965 musb_h_tx_flush_fifo(ep);
1966 csr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1967 csr &= ~( MGC_M_TXCSR_AUTOSET
1968 | MGC_M_TXCSR_DMAENAB
1969 | MGC_M_TXCSR_H_RXSTALL
1970 | MGC_M_TXCSR_H_NAKTIMEOUT
1971 | MGC_M_TXCSR_H_ERROR
1973 musb_writew(epio, MGC_O_HDRC_TXCSR, csr);
1974 /* REVISIT may need to clear FLUSHFIFO ... */
1975 musb_writew(epio, MGC_O_HDRC_TXCSR, csr);
1976 /* flush cpu writebuffer */
1977 csr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1980 musb_advance_schedule(ep->musb, urb, ep, is_in);
1984 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1986 struct musb *musb = hcd_to_musb(hcd);
1988 struct list_head *sched;
1990 unsigned long flags;
1991 int status = -ENOENT;
1993 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
1994 usb_pipedevice(urb->pipe),
1995 usb_pipeendpoint(urb->pipe),
1996 usb_pipein(urb->pipe) ? "in" : "out");
1998 spin_lock_irqsave(&musb->Lock, flags);
2000 /* make sure the urb is still queued and not completed */
2001 spin_lock(&urb->lock);
2004 struct usb_host_endpoint *hep;
2007 list_for_each_entry(tmp, &hep->urb_list, urb_list) {
2014 spin_unlock(&urb->lock);
2016 /* already completed */
2022 /* still queued but not found on the list */
2026 /* Any URB not actively programmed into endpoint hardware can be
2027 * immediately given back. Such an URB must be at the head of its
2028 * endpoint queue, unless someday we get real DMA queues. And even
2029 * then, it might not be known to the hardware...
2031 * Otherwise abort current transfer, pending dma, etc.; urb->status
2032 * has already been updated. This is a synchronous abort; it'd be
2033 * OK to hold off until after some IRQ, though.
2035 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2036 status = -EINPROGRESS;
2039 case USB_ENDPOINT_XFER_CONTROL:
2040 sched = &musb->control;
2042 case USB_ENDPOINT_XFER_BULK:
2043 if (usb_pipein(urb->pipe))
2044 sched = &musb->in_bulk;
2046 sched = &musb->out_bulk;
2049 /* REVISIT when we get a schedule tree, periodic
2050 * transfers won't always be at the head of a
2051 * singleton queue...
2058 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2059 if (status < 0 || (sched && qh != first_qh(sched))) {
2060 int ready = qh->is_ready;
2064 __musb_giveback(musb, urb, 0);
2065 qh->is_ready = ready;
2067 status = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2069 spin_unlock_irqrestore(&musb->Lock, flags);
2073 /* disable an endpoint */
2075 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2077 u8 epnum = hep->desc.bEndpointAddress;
2078 unsigned long flags;
2079 struct musb *musb = hcd_to_musb(hcd);
2080 u8 is_in = epnum & USB_DIR_IN;
2081 struct musb_qh *qh = hep->hcpriv;
2082 struct urb *urb, *tmp;
2083 struct list_head *sched;
2088 spin_lock_irqsave(&musb->Lock, flags);
2091 case USB_ENDPOINT_XFER_CONTROL:
2092 sched = &musb->control;
2094 case USB_ENDPOINT_XFER_BULK:
2096 sched = &musb->in_bulk;
2098 sched = &musb->out_bulk;
2101 /* REVISIT when we get a schedule tree, periodic transfers
2102 * won't always be at the head of a singleton queue...
2108 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2110 /* kick first urb off the hardware, if needed */
2112 if (!sched || qh == first_qh(sched)) {
2115 /* make software (then hardware) stop ASAP */
2116 spin_lock(&urb->lock);
2117 if (urb->status == -EINPROGRESS)
2118 urb->status = -ESHUTDOWN;
2119 spin_unlock(&urb->lock);
2122 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2126 /* then just nuke all the others */
2127 list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
2128 musb_giveback(qh, urb, -ESHUTDOWN);
2130 spin_unlock_irqrestore(&musb->Lock, flags);
2133 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2135 struct musb *musb = hcd_to_musb(hcd);
2137 return musb_readw(musb->pRegs, MGC_O_HDRC_FRAME);
2140 static int musb_h_start(struct usb_hcd *hcd)
2142 struct musb *musb = hcd_to_musb(hcd);
2144 /* NOTE: musb_start() is called when the hub driver turns
2145 * on port power, or when (OTG) peripheral starts.
2147 hcd->state = HC_STATE_RUNNING;
2148 musb->port1_status = 0;
2152 static void musb_h_stop(struct usb_hcd *hcd)
2154 musb_stop(hcd_to_musb(hcd));
2155 hcd->state = HC_STATE_HALT;
2158 static int musb_bus_suspend(struct usb_hcd *hcd)
2160 struct musb *musb = hcd_to_musb(hcd);
2162 if (is_host_active(musb) && musb->is_active)
2168 static int musb_bus_resume(struct usb_hcd *hcd)
2170 /* resuming child port does the work */
2174 const struct hc_driver musb_hc_driver = {
2175 .description = "musb-hcd",
2176 .product_desc = "MUSB HDRC host driver",
2177 .hcd_priv_size = sizeof (struct musb),
2178 .flags = HCD_USB2 | HCD_MEMORY,
2180 /* not using irq handler or reset hooks from usbcore, since
2181 * those must be shared with peripheral code for OTG configs
2184 .start = musb_h_start,
2185 .stop = musb_h_stop,
2187 .get_frame_number = musb_h_get_frame_number,
2189 .urb_enqueue = musb_urb_enqueue,
2190 .urb_dequeue = musb_urb_dequeue,
2191 .endpoint_disable = musb_h_disable,
2193 .hub_status_data = musb_hub_status_data,
2194 .hub_control = musb_hub_control,
2195 .bus_suspend = musb_bus_suspend,
2196 .bus_resume = musb_bus_resume,
2197 // .start_port_reset = NULL,
2198 // .hub_irq_enable = NULL,