2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu
21 * Technically, updating td->status here is a race, but it's not really a
22 * problem. The worst that can happen is that we set the IOC bit again
23 * generating a spurious interrupt. We could fix this by creating another
24 * QH and leaving the IOC bit always set, but then we would have to play
25 * games with the FSBR code to make sure we get the correct order in all
26 * the cases. I don't think it's worth the effort
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
37 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
42 * Full-Speed Bandwidth Reclamation (FSBR).
43 * We turn on FSBR whenever a queue that wants it is advancing,
44 * and leave it on for a short time thereafter.
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
49 uhci->skel_term_qh->link = cpu_to_le32(
50 uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
53 static void uhci_fsbr_off(struct uhci_hcd *uhci)
56 uhci->skel_term_qh->link = UHCI_PTR_TERM;
59 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
61 struct urb_priv *urbp = urb->hcpriv;
63 if (!(urb->transfer_flags & URB_NO_FSBR))
67 static void uhci_qh_wants_fsbr(struct uhci_hcd *uhci, struct uhci_qh *qh)
69 struct urb_priv *urbp =
70 list_entry(qh->queue.next, struct urb_priv, node);
73 uhci->fsbr_jiffies = jiffies;
74 if (!uhci->fsbr_is_on)
80 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
82 dma_addr_t dma_handle;
85 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
89 td->dma_handle = dma_handle;
92 INIT_LIST_HEAD(&td->list);
93 INIT_LIST_HEAD(&td->fl_list);
98 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
100 if (!list_empty(&td->list))
101 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
102 if (!list_empty(&td->fl_list))
103 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
105 dma_pool_free(uhci->td_pool, td, td->dma_handle);
108 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
109 u32 token, u32 buffer)
111 td->status = cpu_to_le32(status);
112 td->token = cpu_to_le32(token);
113 td->buffer = cpu_to_le32(buffer);
116 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
118 list_add_tail(&td->list, &urbp->td_list);
121 static void uhci_remove_td_from_urbp(struct uhci_td *td)
123 list_del_init(&td->list);
127 * We insert Isochronous URBs directly into the frame list at the beginning
129 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
130 struct uhci_td *td, unsigned framenum)
132 framenum &= (UHCI_NUMFRAMES - 1);
134 td->frame = framenum;
136 /* Is there a TD already mapped there? */
137 if (uhci->frame_cpu[framenum]) {
138 struct uhci_td *ftd, *ltd;
140 ftd = uhci->frame_cpu[framenum];
141 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
143 list_add_tail(&td->fl_list, &ftd->fl_list);
145 td->link = ltd->link;
147 ltd->link = cpu_to_le32(td->dma_handle);
149 td->link = uhci->frame[framenum];
151 uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
152 uhci->frame_cpu[framenum] = td;
156 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
159 /* If it's not inserted, don't remove it */
160 if (td->frame == -1) {
161 WARN_ON(!list_empty(&td->fl_list));
165 if (uhci->frame_cpu[td->frame] == td) {
166 if (list_empty(&td->fl_list)) {
167 uhci->frame[td->frame] = td->link;
168 uhci->frame_cpu[td->frame] = NULL;
172 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
173 uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
174 uhci->frame_cpu[td->frame] = ntd;
179 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
180 ptd->link = td->link;
183 list_del_init(&td->fl_list);
187 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
188 unsigned int framenum)
190 struct uhci_td *ftd, *ltd;
192 framenum &= (UHCI_NUMFRAMES - 1);
194 ftd = uhci->frame_cpu[framenum];
196 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
197 uhci->frame[framenum] = ltd->link;
198 uhci->frame_cpu[framenum] = NULL;
200 while (!list_empty(&ftd->fl_list))
201 list_del_init(ftd->fl_list.prev);
206 * Remove all the TDs for an Isochronous URB from the frame list
208 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
210 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
213 list_for_each_entry(td, &urbp->td_list, list)
214 uhci_remove_td_from_frame_list(uhci, td);
217 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
218 struct usb_device *udev, struct usb_host_endpoint *hep)
220 dma_addr_t dma_handle;
223 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
227 memset(qh, 0, sizeof(*qh));
228 qh->dma_handle = dma_handle;
230 qh->element = UHCI_PTR_TERM;
231 qh->link = UHCI_PTR_TERM;
233 INIT_LIST_HEAD(&qh->queue);
234 INIT_LIST_HEAD(&qh->node);
236 if (udev) { /* Normal QH */
237 qh->dummy_td = uhci_alloc_td(uhci);
239 dma_pool_free(uhci->qh_pool, qh, dma_handle);
242 qh->state = QH_STATE_IDLE;
246 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
248 } else { /* Skeleton QH */
249 qh->state = QH_STATE_ACTIVE;
255 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
257 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
258 if (!list_empty(&qh->queue))
259 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
263 qh->hep->hcpriv = NULL;
264 uhci_free_td(uhci, qh->dummy_td);
266 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
270 * When a queue is stopped and a dequeued URB is given back, adjust
271 * the previous TD link (if the URB isn't first on the queue) or
272 * save its toggle value (if it is first and is currently executing).
274 * Returns 0 if the URB should not yet be given back, 1 otherwise.
276 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
279 struct urb_priv *urbp = urb->hcpriv;
283 /* Isochronous pipes don't use toggles and their TD link pointers
284 * get adjusted during uhci_urb_dequeue(). But since their queues
285 * cannot truly be stopped, we have to watch out for dequeues
286 * occurring after the nominal unlink frame. */
287 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
288 ret = (uhci->frame_number + uhci->is_stopped !=
293 /* If the URB isn't first on its queue, adjust the link pointer
294 * of the last TD in the previous URB. The toggle doesn't need
295 * to be saved since this URB can't be executing yet. */
296 if (qh->queue.next != &urbp->node) {
297 struct urb_priv *purbp;
300 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
301 WARN_ON(list_empty(&purbp->td_list));
302 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
304 td = list_entry(urbp->td_list.prev, struct uhci_td,
306 ptd->link = td->link;
310 /* If the QH element pointer is UHCI_PTR_TERM then then currently
311 * executing URB has already been unlinked, so this one isn't it. */
312 if (qh_element(qh) == UHCI_PTR_TERM)
314 qh->element = UHCI_PTR_TERM;
316 /* Control pipes have to worry about toggles */
317 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
320 /* Save the next toggle value */
321 WARN_ON(list_empty(&urbp->td_list));
322 td = list_entry(urbp->td_list.next, struct uhci_td, list);
324 qh->initial_toggle = uhci_toggle(td_token(td));
329 * Fix up the data toggles for URBs in a queue, when one of them
330 * terminates early (short transfer, error, or dequeued).
332 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
334 struct urb_priv *urbp = NULL;
336 unsigned int toggle = qh->initial_toggle;
339 /* Fixups for a short transfer start with the second URB in the
340 * queue (the short URB is the first). */
342 urbp = list_entry(qh->queue.next, struct urb_priv, node);
344 /* When starting with the first URB, if the QH element pointer is
345 * still valid then we know the URB's toggles are okay. */
346 else if (qh_element(qh) != UHCI_PTR_TERM)
349 /* Fix up the toggle for the URBs in the queue. Normally this
350 * loop won't run more than once: When an error or short transfer
351 * occurs, the queue usually gets emptied. */
352 urbp = list_prepare_entry(urbp, &qh->queue, node);
353 list_for_each_entry_continue(urbp, &qh->queue, node) {
355 /* If the first TD has the right toggle value, we don't
356 * need to change any toggles in this URB */
357 td = list_entry(urbp->td_list.next, struct uhci_td, list);
358 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
359 td = list_entry(urbp->td_list.next, struct uhci_td,
361 toggle = uhci_toggle(td_token(td)) ^ 1;
363 /* Otherwise all the toggles in the URB have to be switched */
365 list_for_each_entry(td, &urbp->td_list, list) {
366 td->token ^= __constant_cpu_to_le32(
374 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
375 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
376 usb_pipeout(pipe), toggle);
381 * Put a QH on the schedule in both hardware and software
383 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
387 WARN_ON(list_empty(&qh->queue));
389 /* Set the element pointer if it isn't set already.
390 * This isn't needed for Isochronous queues, but it doesn't hurt. */
391 if (qh_element(qh) == UHCI_PTR_TERM) {
392 struct urb_priv *urbp = list_entry(qh->queue.next,
393 struct urb_priv, node);
394 struct uhci_td *td = list_entry(urbp->td_list.next,
395 struct uhci_td, list);
397 qh->element = cpu_to_le32(td->dma_handle);
400 /* Treat the queue as if it has just advanced */
401 qh->wait_expired = 0;
402 qh->advance_jiffies = jiffies;
404 if (qh->state == QH_STATE_ACTIVE)
406 qh->state = QH_STATE_ACTIVE;
408 /* Move the QH from its old list to the end of the appropriate
410 if (qh == uhci->next_qh)
411 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
413 list_move_tail(&qh->node, &qh->skel->node);
415 /* Link it into the schedule */
416 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
417 qh->link = pqh->link;
419 pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle);
423 * Take a QH off the hardware schedule
425 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
429 if (qh->state == QH_STATE_UNLINKING)
431 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
432 qh->state = QH_STATE_UNLINKING;
434 /* Unlink the QH from the schedule and record when we did it */
435 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
436 pqh->link = qh->link;
439 uhci_get_current_frame_number(uhci);
440 qh->unlink_frame = uhci->frame_number;
442 /* Force an interrupt so we know when the QH is fully unlinked */
443 if (list_empty(&uhci->skel_unlink_qh->node))
444 uhci_set_next_interrupt(uhci);
446 /* Move the QH from its old list to the end of the unlinking list */
447 if (qh == uhci->next_qh)
448 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
450 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
454 * When we and the controller are through with a QH, it becomes IDLE.
455 * This happens when a QH has been off the schedule (on the unlinking
456 * list) for more than one frame, or when an error occurs while adding
457 * the first URB onto a new QH.
459 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
461 WARN_ON(qh->state == QH_STATE_ACTIVE);
463 if (qh == uhci->next_qh)
464 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
466 list_move(&qh->node, &uhci->idle_qh_list);
467 qh->state = QH_STATE_IDLE;
469 /* Now that the QH is idle, its post_td isn't being used */
471 uhci_free_td(uhci, qh->post_td);
475 /* If anyone is waiting for a QH to become idle, wake them up */
476 if (uhci->num_waiting)
477 wake_up_all(&uhci->waitqh);
480 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
483 struct urb_priv *urbp;
485 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
489 memset((void *)urbp, 0, sizeof(*urbp));
494 INIT_LIST_HEAD(&urbp->node);
495 INIT_LIST_HEAD(&urbp->td_list);
500 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
501 struct urb_priv *urbp)
503 struct uhci_td *td, *tmp;
505 if (!list_empty(&urbp->node))
506 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
509 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
510 uhci_remove_td_from_urbp(td);
511 uhci_free_td(uhci, td);
514 urbp->urb->hcpriv = NULL;
515 kmem_cache_free(uhci_up_cachep, urbp);
519 * Map status to standard result codes
521 * <status> is (td_status(td) & 0xF60000), a.k.a.
522 * uhci_status_bits(td_status(td)).
523 * Note: <status> does not include the TD_CTRL_NAK bit.
524 * <dir_out> is True for output TDs and False for input TDs.
526 static int uhci_map_status(int status, int dir_out)
530 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
532 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
538 if (status & TD_CTRL_BABBLE) /* Babble */
540 if (status & TD_CTRL_DBUFERR) /* Buffer error */
542 if (status & TD_CTRL_STALLED) /* Stalled */
550 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
554 unsigned long destination, status;
555 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
556 int len = urb->transfer_buffer_length;
557 dma_addr_t data = urb->transfer_dma;
559 struct urb_priv *urbp = urb->hcpriv;
561 /* The "pipe" thing contains the destination in bits 8--18 */
562 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
564 /* 3 errors, dummy TD remains inactive */
565 status = uhci_maxerr(3);
566 if (urb->dev->speed == USB_SPEED_LOW)
567 status |= TD_CTRL_LS;
570 * Build the TD for the control request setup packet
573 uhci_add_td_to_urbp(td, urbp);
574 uhci_fill_td(td, status, destination | uhci_explen(8),
577 status |= TD_CTRL_ACTIVE;
580 * If direction is "send", change the packet ID from SETUP (0x2D)
581 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
582 * set Short Packet Detect (SPD) for all data packets.
584 if (usb_pipeout(urb->pipe))
585 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
587 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
588 status |= TD_CTRL_SPD;
595 int pktsze = min(len, maxsze);
597 td = uhci_alloc_td(uhci);
600 *plink = cpu_to_le32(td->dma_handle);
602 /* Alternate Data0/1 (start with Data1) */
603 destination ^= TD_TOKEN_TOGGLE;
605 uhci_add_td_to_urbp(td, urbp);
606 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
615 * Build the final TD for control status
617 td = uhci_alloc_td(uhci);
620 *plink = cpu_to_le32(td->dma_handle);
623 * It's IN if the pipe is an output pipe or we're not expecting
626 destination &= ~TD_TOKEN_PID_MASK;
627 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
628 destination |= USB_PID_IN;
630 destination |= USB_PID_OUT;
632 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
634 status &= ~TD_CTRL_SPD;
636 uhci_add_td_to_urbp(td, urbp);
637 uhci_fill_td(td, status | TD_CTRL_IOC,
638 destination | uhci_explen(0), 0);
642 * Build the new dummy TD and activate the old one
644 td = uhci_alloc_td(uhci);
647 *plink = cpu_to_le32(td->dma_handle);
649 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
651 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
654 /* Low-speed transfers get a different queue, and won't hog the bus.
655 * Also, some devices enumerate better without FSBR; the easiest way
656 * to do that is to put URBs on the low-speed queue while the device
657 * isn't in the CONFIGURED state. */
658 if (urb->dev->speed == USB_SPEED_LOW ||
659 urb->dev->state != USB_STATE_CONFIGURED)
660 qh->skel = uhci->skel_ls_control_qh;
662 qh->skel = uhci->skel_fs_control_qh;
663 uhci_add_fsbr(uhci, urb);
666 urb->actual_length = -8; /* Account for the SETUP packet */
670 /* Remove the dummy TD from the td_list so it doesn't get freed */
671 uhci_remove_td_from_urbp(qh->dummy_td);
676 * Common submit for bulk and interrupt
678 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
682 unsigned long destination, status;
683 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
684 int len = urb->transfer_buffer_length;
685 dma_addr_t data = urb->transfer_dma;
687 struct urb_priv *urbp = urb->hcpriv;
693 /* The "pipe" thing contains the destination in bits 8--18 */
694 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
695 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
696 usb_pipeout(urb->pipe));
698 /* 3 errors, dummy TD remains inactive */
699 status = uhci_maxerr(3);
700 if (urb->dev->speed == USB_SPEED_LOW)
701 status |= TD_CTRL_LS;
702 if (usb_pipein(urb->pipe))
703 status |= TD_CTRL_SPD;
710 do { /* Allow zero length packets */
713 if (len <= pktsze) { /* The last packet */
715 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
716 status &= ~TD_CTRL_SPD;
720 td = uhci_alloc_td(uhci);
723 *plink = cpu_to_le32(td->dma_handle);
725 uhci_add_td_to_urbp(td, urbp);
726 uhci_fill_td(td, status,
727 destination | uhci_explen(pktsze) |
728 (toggle << TD_TOKEN_TOGGLE_SHIFT),
731 status |= TD_CTRL_ACTIVE;
739 * URB_ZERO_PACKET means adding a 0-length packet, if direction
740 * is OUT and the transfer_length was an exact multiple of maxsze,
741 * hence (len = transfer_length - N * maxsze) == 0
742 * however, if transfer_length == 0, the zero packet was already
745 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
746 usb_pipeout(urb->pipe) && len == 0 &&
747 urb->transfer_buffer_length > 0) {
748 td = uhci_alloc_td(uhci);
751 *plink = cpu_to_le32(td->dma_handle);
753 uhci_add_td_to_urbp(td, urbp);
754 uhci_fill_td(td, status,
755 destination | uhci_explen(0) |
756 (toggle << TD_TOKEN_TOGGLE_SHIFT),
763 /* Set the interrupt-on-completion flag on the last packet.
764 * A more-or-less typical 4 KB URB (= size of one memory page)
765 * will require about 3 ms to transfer; that's a little on the
766 * fast side but not enough to justify delaying an interrupt
767 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
769 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
772 * Build the new dummy TD and activate the old one
774 td = uhci_alloc_td(uhci);
777 *plink = cpu_to_le32(td->dma_handle);
779 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
781 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
783 qh->period = urb->interval;
785 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
786 usb_pipeout(urb->pipe), toggle);
790 /* Remove the dummy TD from the td_list so it doesn't get freed */
791 uhci_remove_td_from_urbp(qh->dummy_td);
795 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
800 /* Can't have low-speed bulk transfers */
801 if (urb->dev->speed == USB_SPEED_LOW)
804 qh->skel = uhci->skel_bulk_qh;
805 ret = uhci_submit_common(uhci, urb, qh);
807 uhci_add_fsbr(uhci, urb);
811 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
816 /* USB 1.1 interrupt transfers only involve one packet per interval.
817 * Drivers can submit URBs of any length, but longer ones will need
818 * multiple intervals to complete.
821 /* Figure out which power-of-two queue to use */
822 for (exponent = 7; exponent >= 0; --exponent) {
823 if ((1 << exponent) <= urb->interval)
828 urb->interval = 1 << exponent;
831 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)];
832 else if (qh->period != urb->interval)
833 return -EINVAL; /* Can't change the period */
835 return uhci_submit_common(uhci, urb, qh);
839 * Fix up the data structures following a short transfer
841 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
842 struct uhci_qh *qh, struct urb_priv *urbp)
845 struct list_head *tmp;
848 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
849 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
851 /* When a control transfer is short, we have to restart
852 * the queue at the status stage transaction, which is
854 WARN_ON(list_empty(&urbp->td_list));
855 qh->element = cpu_to_le32(td->dma_handle);
861 /* When a bulk/interrupt transfer is short, we have to
862 * fix up the toggles of the following URBs on the queue
863 * before restarting the queue at the next URB. */
864 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
865 uhci_fixup_toggles(qh, 1);
867 if (list_empty(&urbp->td_list))
869 qh->element = td->link;
870 tmp = urbp->td_list.prev;
874 /* Remove all the TDs we skipped over, from tmp back to the start */
875 while (tmp != &urbp->td_list) {
876 td = list_entry(tmp, struct uhci_td, list);
879 uhci_remove_td_from_urbp(td);
880 uhci_free_td(uhci, td);
886 * Common result for control, bulk, and interrupt
888 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
890 struct urb_priv *urbp = urb->hcpriv;
891 struct uhci_qh *qh = urbp->qh;
892 struct uhci_td *td, *tmp;
896 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
897 unsigned int ctrlstat;
900 ctrlstat = td_status(td);
901 status = uhci_status_bits(ctrlstat);
902 if (status & TD_CTRL_ACTIVE)
905 len = uhci_actual_length(ctrlstat);
906 urb->actual_length += len;
909 ret = uhci_map_status(status,
910 uhci_packetout(td_token(td)));
911 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
912 /* Some debugging code */
913 dev_dbg(uhci_dev(uhci),
914 "%s: failed with status %x\n",
915 __FUNCTION__, status);
917 if (debug > 1 && errbuf) {
918 /* Print the chain for debugging */
919 uhci_show_qh(urbp->qh, errbuf,
925 } else if (len < uhci_expected_length(td_token(td))) {
927 /* We received a short packet */
928 if (urb->transfer_flags & URB_SHORT_NOT_OK)
930 else if (ctrlstat & TD_CTRL_SPD)
934 uhci_remove_td_from_urbp(td);
936 uhci_free_td(uhci, qh->post_td);
946 /* In case a control transfer gets an error
947 * during the setup stage */
948 urb->actual_length = max(urb->actual_length, 0);
950 /* Note that the queue has stopped and save
951 * the next toggle value */
952 qh->element = UHCI_PTR_TERM;
954 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
955 qh->initial_toggle = uhci_toggle(td_token(td)) ^
958 } else /* Short packet received */
959 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
964 * Isochronous transfers
966 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
969 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
971 unsigned long destination, status;
972 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
974 /* Values must not be too big (could overflow below) */
975 if (urb->interval >= UHCI_NUMFRAMES ||
976 urb->number_of_packets >= UHCI_NUMFRAMES)
979 /* Check the period and figure out the starting frame number */
980 if (qh->period == 0) {
981 if (urb->transfer_flags & URB_ISO_ASAP) {
982 uhci_get_current_frame_number(uhci);
983 urb->start_frame = uhci->frame_number + 10;
985 i = urb->start_frame - uhci->last_iso_frame;
986 if (i <= 0 || i >= UHCI_NUMFRAMES)
989 } else if (qh->period != urb->interval) {
990 return -EINVAL; /* Can't change the period */
992 } else { /* Pick up where the last URB leaves off */
993 if (list_empty(&qh->queue)) {
994 frame = qh->iso_frame;
998 lurb = list_entry(qh->queue.prev,
999 struct urb_priv, node)->urb;
1000 frame = lurb->start_frame +
1001 lurb->number_of_packets *
1004 if (urb->transfer_flags & URB_ISO_ASAP)
1005 urb->start_frame = frame;
1006 else if (urb->start_frame != frame)
1010 /* Make sure we won't have to go too far into the future */
1011 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1012 urb->start_frame + urb->number_of_packets *
1016 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1017 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1019 for (i = 0; i < urb->number_of_packets; i++) {
1020 td = uhci_alloc_td(uhci);
1024 uhci_add_td_to_urbp(td, urbp);
1025 uhci_fill_td(td, status, destination |
1026 uhci_explen(urb->iso_frame_desc[i].length),
1028 urb->iso_frame_desc[i].offset);
1031 /* Set the interrupt-on-completion flag on the last packet. */
1032 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1034 qh->skel = uhci->skel_iso_qh;
1035 qh->period = urb->interval;
1037 /* Add the TDs to the frame list */
1038 frame = urb->start_frame;
1039 list_for_each_entry(td, &urbp->td_list, list) {
1040 uhci_insert_td_in_frame_list(uhci, td, frame);
1041 frame += qh->period;
1044 if (list_empty(&qh->queue)) {
1045 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1046 qh->iso_frame = urb->start_frame;
1053 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1055 struct uhci_td *td, *tmp;
1056 struct urb_priv *urbp = urb->hcpriv;
1057 struct uhci_qh *qh = urbp->qh;
1059 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1060 unsigned int ctrlstat;
1064 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1065 return -EINPROGRESS;
1067 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1069 ctrlstat = td_status(td);
1070 if (ctrlstat & TD_CTRL_ACTIVE) {
1071 status = -EXDEV; /* TD was added too late? */
1073 status = uhci_map_status(uhci_status_bits(ctrlstat),
1074 usb_pipeout(urb->pipe));
1075 actlength = uhci_actual_length(ctrlstat);
1077 urb->actual_length += actlength;
1078 qh->iso_packet_desc->actual_length = actlength;
1079 qh->iso_packet_desc->status = status;
1084 qh->iso_status = status;
1087 uhci_remove_td_from_urbp(td);
1088 uhci_free_td(uhci, td);
1089 qh->iso_frame += qh->period;
1090 ++qh->iso_packet_desc;
1092 return qh->iso_status;
1095 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1096 struct usb_host_endpoint *hep,
1097 struct urb *urb, gfp_t mem_flags)
1100 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1101 unsigned long flags;
1102 struct urb_priv *urbp;
1106 spin_lock_irqsave(&uhci->lock, flags);
1109 if (ret != -EINPROGRESS) /* URB already unlinked! */
1113 urbp = uhci_alloc_urb_priv(uhci, urb);
1118 qh = (struct uhci_qh *) hep->hcpriv;
1120 qh = uhci_alloc_qh(uhci, urb->dev, hep);
1127 case USB_ENDPOINT_XFER_CONTROL:
1128 ret = uhci_submit_control(uhci, urb, qh);
1130 case USB_ENDPOINT_XFER_BULK:
1131 ret = uhci_submit_bulk(uhci, urb, qh);
1133 case USB_ENDPOINT_XFER_INT:
1134 if (list_empty(&qh->queue)) {
1135 bustime = usb_check_bandwidth(urb->dev, urb);
1139 ret = uhci_submit_interrupt(uhci, urb, qh);
1141 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1143 } else { /* inherit from parent */
1144 struct urb_priv *eurbp;
1146 eurbp = list_entry(qh->queue.prev, struct urb_priv,
1148 urb->bandwidth = eurbp->urb->bandwidth;
1149 ret = uhci_submit_interrupt(uhci, urb, qh);
1152 case USB_ENDPOINT_XFER_ISOC:
1153 urb->error_count = 0;
1154 bustime = usb_check_bandwidth(urb->dev, urb);
1160 ret = uhci_submit_isochronous(uhci, urb, qh);
1162 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1166 goto err_submit_failed;
1168 /* Add this URB to the QH */
1170 list_add_tail(&urbp->node, &qh->queue);
1172 /* If the new URB is the first and only one on this QH then either
1173 * the QH is new and idle or else it's unlinked and waiting to
1174 * become idle, so we can activate it right away. But only if the
1175 * queue isn't stopped. */
1176 if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1177 uhci_activate_qh(uhci, qh);
1178 uhci_qh_wants_fsbr(uhci, qh);
1183 if (qh->state == QH_STATE_IDLE)
1184 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1187 uhci_free_urb_priv(uhci, urbp);
1190 spin_unlock_irqrestore(&uhci->lock, flags);
1194 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1196 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1197 unsigned long flags;
1198 struct urb_priv *urbp;
1201 spin_lock_irqsave(&uhci->lock, flags);
1203 if (!urbp) /* URB was never linked! */
1207 /* Remove Isochronous TDs from the frame list ASAP */
1208 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1209 uhci_unlink_isochronous_tds(uhci, urb);
1212 /* If the URB has already started, update the QH unlink time */
1213 uhci_get_current_frame_number(uhci);
1214 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1215 qh->unlink_frame = uhci->frame_number;
1218 uhci_unlink_qh(uhci, qh);
1221 spin_unlock_irqrestore(&uhci->lock, flags);
1226 * Finish unlinking an URB and give it back
1228 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1229 struct urb *urb, struct pt_regs *regs)
1230 __releases(uhci->lock)
1231 __acquires(uhci->lock)
1233 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1235 /* When giving back the first URB in an Isochronous queue,
1236 * reinitialize the QH's iso-related members for the next URB. */
1237 if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1238 urbp->node.prev == &qh->queue &&
1239 urbp->node.next != &qh->queue) {
1240 struct urb *nurb = list_entry(urbp->node.next,
1241 struct urb_priv, node)->urb;
1243 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1244 qh->iso_frame = nurb->start_frame;
1248 /* Take the URB off the QH's queue. If the queue is now empty,
1249 * this is a perfect time for a toggle fixup. */
1250 list_del_init(&urbp->node);
1251 if (list_empty(&qh->queue) && qh->needs_fixup) {
1252 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1253 usb_pipeout(urb->pipe), qh->initial_toggle);
1254 qh->needs_fixup = 0;
1257 uhci_free_urb_priv(uhci, urbp);
1260 case USB_ENDPOINT_XFER_ISOC:
1261 /* Release bandwidth for Interrupt or Isoc. transfers */
1263 usb_release_bandwidth(urb->dev, urb, 1);
1265 case USB_ENDPOINT_XFER_INT:
1266 /* Release bandwidth for Interrupt or Isoc. transfers */
1267 /* Make sure we don't release if we have a queued URB */
1268 if (list_empty(&qh->queue) && urb->bandwidth)
1269 usb_release_bandwidth(urb->dev, urb, 0);
1271 /* bandwidth was passed on to queued URB, */
1272 /* so don't let usb_unlink_urb() release it */
1277 spin_unlock(&uhci->lock);
1278 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);
1279 spin_lock(&uhci->lock);
1281 /* If the queue is now empty, we can unlink the QH and give up its
1282 * reserved bandwidth. */
1283 if (list_empty(&qh->queue)) {
1284 uhci_unlink_qh(uhci, qh);
1286 /* Bandwidth stuff not yet implemented */
1292 * Scan the URBs in a QH's queue
1294 #define QH_FINISHED_UNLINKING(qh) \
1295 (qh->state == QH_STATE_UNLINKING && \
1296 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1298 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
1299 struct pt_regs *regs)
1301 struct urb_priv *urbp;
1305 while (!list_empty(&qh->queue)) {
1306 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1309 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1310 status = uhci_result_isochronous(uhci, urb);
1312 status = uhci_result_common(uhci, urb);
1313 if (status == -EINPROGRESS)
1316 spin_lock(&urb->lock);
1317 if (urb->status == -EINPROGRESS) /* Not dequeued */
1318 urb->status = status;
1320 status = ECONNRESET; /* Not -ECONNRESET */
1321 spin_unlock(&urb->lock);
1323 /* Dequeued but completed URBs can't be given back unless
1324 * the QH is stopped or has finished unlinking. */
1325 if (status == ECONNRESET) {
1326 if (QH_FINISHED_UNLINKING(qh))
1328 else if (!qh->is_stopped)
1332 uhci_giveback_urb(uhci, qh, urb, regs);
1337 /* If the QH is neither stopped nor finished unlinking (normal case),
1338 * our work here is done. */
1339 if (QH_FINISHED_UNLINKING(qh))
1341 else if (!qh->is_stopped)
1344 /* Otherwise give back each of the dequeued URBs */
1346 list_for_each_entry(urbp, &qh->queue, node) {
1348 if (urb->status != -EINPROGRESS) {
1350 /* Fix up the TD links and save the toggles for
1351 * non-Isochronous queues. For Isochronous queues,
1352 * test for too-recent dequeues. */
1353 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1357 uhci_giveback_urb(uhci, qh, urb, regs);
1363 /* There are no more dequeued URBs. If there are still URBs on the
1364 * queue, the QH can now be re-activated. */
1365 if (!list_empty(&qh->queue)) {
1366 if (qh->needs_fixup)
1367 uhci_fixup_toggles(qh, 0);
1369 /* If the first URB on the queue wants FSBR but its time
1370 * limit has expired, set the next TD to interrupt on
1371 * completion before reactivating the QH. */
1372 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1373 if (urbp->fsbr && qh->wait_expired) {
1374 struct uhci_td *td = list_entry(urbp->td_list.next,
1375 struct uhci_td, list);
1377 td->status |= __cpu_to_le32(TD_CTRL_IOC);
1380 uhci_activate_qh(uhci, qh);
1383 /* The queue is empty. The QH can become idle if it is fully
1385 else if (QH_FINISHED_UNLINKING(qh))
1386 uhci_make_qh_idle(uhci, qh);
1390 * Check for queues that have made some forward progress.
1391 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1392 * has not advanced since last examined; 1 otherwise.
1394 * Early Intel controllers have a bug which causes qh->element sometimes
1395 * not to advance when a TD completes successfully. The queue remains
1396 * stuck on the inactive completed TD. We detect such cases and advance
1397 * the element pointer by hand.
1399 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1401 struct urb_priv *urbp = NULL;
1406 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1409 /* Treat an UNLINKING queue as though it hasn't advanced.
1410 * This is okay because reactivation will treat it as though
1411 * it has advanced, and if it is going to become IDLE then
1412 * this doesn't matter anyway. Furthermore it's possible
1413 * for an UNLINKING queue not to have any URBs at all, or
1414 * for its first URB not to have any TDs (if it was dequeued
1415 * just as it completed). So it's not easy in any case to
1416 * test whether such queues have advanced. */
1417 if (qh->state != QH_STATE_ACTIVE) {
1422 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1423 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1424 status = td_status(td);
1425 if (!(status & TD_CTRL_ACTIVE)) {
1427 /* We're okay, the queue has advanced */
1428 qh->wait_expired = 0;
1429 qh->advance_jiffies = jiffies;
1435 /* The queue hasn't advanced; check for timeout */
1436 if (!qh->wait_expired && time_after(jiffies,
1437 qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1439 /* Detect the Intel bug and work around it */
1440 if (qh->post_td && qh_element(qh) ==
1441 cpu_to_le32(qh->post_td->dma_handle)) {
1442 qh->element = qh->post_td->link;
1443 qh->advance_jiffies = jiffies;
1447 qh->wait_expired = 1;
1449 /* If the current URB wants FSBR, unlink it temporarily
1450 * so that we can safely set the next TD to interrupt on
1451 * completion. That way we'll know as soon as the queue
1452 * starts moving again. */
1453 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1454 uhci_unlink_qh(uhci, qh);
1460 * Process events in the schedule, but only in one thread at a time
1462 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1467 /* Don't allow re-entrant calls */
1468 if (uhci->scan_in_progress) {
1469 uhci->need_rescan = 1;
1472 uhci->scan_in_progress = 1;
1474 uhci->need_rescan = 0;
1476 uhci_clear_next_interrupt(uhci);
1477 uhci_get_current_frame_number(uhci);
1478 uhci->cur_iso_frame = uhci->frame_number;
1480 /* Go through all the QH queues and process the URBs in each one */
1481 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1482 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1483 struct uhci_qh, node);
1484 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1485 uhci->next_qh = list_entry(qh->node.next,
1486 struct uhci_qh, node);
1488 if (uhci_advance_check(uhci, qh)) {
1489 uhci_scan_qh(uhci, qh, regs);
1490 if (qh->state == QH_STATE_ACTIVE)
1491 uhci_qh_wants_fsbr(uhci, qh);
1496 uhci->last_iso_frame = uhci->cur_iso_frame;
1497 if (uhci->need_rescan)
1499 uhci->scan_in_progress = 0;
1501 if (uhci->fsbr_is_on && time_after(jiffies,
1502 uhci->fsbr_jiffies + FSBR_OFF_DELAY))
1503 uhci_fsbr_off(uhci);
1505 if (list_empty(&uhci->skel_unlink_qh->node))
1506 uhci_clear_next_interrupt(uhci);
1508 uhci_set_next_interrupt(uhci);