2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 /* #define VERBOSE_DEBUG */
25 #include <linux/kernel.h>
26 #include <linux/utsname.h>
27 #include <linux/device.h>
28 #include <linux/ctype.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
36 * This component encapsulates the Ethernet link glue needed to provide
37 * one (!) network link through the USB gadget stack, normally "usb0".
39 * The control and data models are handled by the function driver which
40 * connects to this code; such as CDC Ethernet, "CDC Subset", or RNDIS.
41 * That includes all descriptor and endpoint management.
43 * Link level addressing is handled by this component using module
44 * parameters; if no such parameters are provided, random link level
45 * addresses are used. Each end of the link uses one address. The
46 * host end address is exported in various ways, and is often recorded
47 * in configuration databases.
49 * The driver which assembles each configuration using such a link is
50 * responsible for ensuring that each configuration includes at most one
51 * instance of is network link. (The network layer provides ways for
52 * this single "physical" link to be used by multiple virtual links.)
55 #define DRIVER_VERSION "29-May-2008"
58 /* lock is held while accessing port_usb
59 * or updating its backlink port_usb->ioport
62 struct gether *port_usb;
64 struct net_device *net;
65 struct usb_gadget *gadget;
67 spinlock_t req_lock; /* guard {rx,tx}_reqs */
68 struct list_head tx_reqs, rx_reqs;
72 struct sk_buff *(*wrap)(struct sk_buff *skb);
73 int (*unwrap)(struct sk_buff *skb);
75 struct work_struct work;
78 #define WORK_RX_MEMORY 0
81 u8 host_mac[ETH_ALEN];
84 /*-------------------------------------------------------------------------*/
86 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
88 #define DEFAULT_QLEN 2 /* double buffering by default */
91 #ifdef CONFIG_USB_GADGET_DUALSPEED
93 static unsigned qmult = 5;
94 module_param(qmult, uint, S_IRUGO|S_IWUSR);
95 MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
97 #else /* full speed (low speed doesn't do bulk) */
101 /* for dual-speed hardware, use deeper queues at highspeed */
102 static inline int qlen(struct usb_gadget *gadget)
104 if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
105 return qmult * DEFAULT_QLEN;
110 /*-------------------------------------------------------------------------*/
112 /* REVISIT there must be a better way than having two sets
122 #define xprintk(d, level, fmt, args...) \
123 printk(level "%s: " fmt , (d)->net->name , ## args)
127 #define DBG(dev, fmt, args...) \
128 xprintk(dev , KERN_DEBUG , fmt , ## args)
130 #define DBG(dev, fmt, args...) \
137 #define VDBG(dev, fmt, args...) \
141 #define ERROR(dev, fmt, args...) \
142 xprintk(dev , KERN_ERR , fmt , ## args)
143 #define WARN(dev, fmt, args...) \
144 xprintk(dev , KERN_WARNING , fmt , ## args)
145 #define INFO(dev, fmt, args...) \
146 xprintk(dev , KERN_INFO , fmt , ## args)
148 /*-------------------------------------------------------------------------*/
150 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
152 static int eth_change_mtu(struct net_device *net, int new_mtu)
154 struct eth_dev *dev = netdev_priv(net);
158 /* don't change MTU on "live" link (peer won't know) */
159 spin_lock_irqsave(&dev->lock, flags);
162 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
166 spin_unlock_irqrestore(&dev->lock, flags);
171 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
173 struct eth_dev *dev = netdev_priv(net);
175 strlcpy(p->driver, "g_ether", sizeof p->driver);
176 strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
177 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
178 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
181 static u32 eth_get_link(struct net_device *net)
183 struct eth_dev *dev = netdev_priv(net);
184 return dev->gadget->speed != USB_SPEED_UNKNOWN;
187 /* REVISIT can also support:
188 * - WOL (by tracking suspends and issuing remote wakeup)
189 * - msglevel (implies updated messaging)
190 * - ... probably more ethtool ops
193 static struct ethtool_ops ops = {
194 .get_drvinfo = eth_get_drvinfo,
195 .get_link = eth_get_link
198 static void defer_kevent(struct eth_dev *dev, int flag)
200 if (test_and_set_bit(flag, &dev->todo))
202 if (!schedule_work(&dev->work))
203 ERROR(dev, "kevent %d may have been dropped\n", flag);
205 DBG(dev, "kevent %d scheduled\n", flag);
208 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
211 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
214 int retval = -ENOMEM;
219 spin_lock_irqsave(&dev->lock, flags);
221 out = dev->port_usb->out_ep;
224 spin_unlock_irqrestore(&dev->lock, flags);
230 /* Padding up to RX_EXTRA handles minor disagreements with host.
231 * Normally we use the USB "terminate on short read" convention;
232 * so allow up to (N*maxpacket), since that memory is normally
233 * already allocated. Some hardware doesn't deal well with short
234 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
235 * byte off the end (to force hardware errors on overflow).
237 * RNDIS uses internal framing, and explicitly allows senders to
238 * pad to end-of-packet. That's potentially nice for speed, but
239 * means receivers can't recover lost synch on their own (because
240 * new packets don't only start after a short RX).
242 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
243 size += dev->port_usb->header_len;
244 size += out->maxpacket - 1;
245 size -= size % out->maxpacket;
247 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
249 DBG(dev, "no rx skb\n");
253 /* Some platforms perform better when IP packets are aligned,
254 * but on at least one, checksumming fails otherwise. Note:
255 * RNDIS headers involve variable numbers of LE32 values.
257 skb_reserve(skb, NET_IP_ALIGN);
259 req->buf = skb->data;
261 req->complete = rx_complete;
264 retval = usb_ep_queue(out, req, gfp_flags);
265 if (retval == -ENOMEM)
267 defer_kevent(dev, WORK_RX_MEMORY);
269 DBG(dev, "rx submit --> %d\n", retval);
271 dev_kfree_skb_any(skb);
272 spin_lock_irqsave(&dev->req_lock, flags);
273 list_add(&req->list, &dev->rx_reqs);
274 spin_unlock_irqrestore(&dev->req_lock, flags);
279 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
281 struct sk_buff *skb = req->context;
282 struct eth_dev *dev = ep->driver_data;
283 int status = req->status;
287 /* normal completion */
289 skb_put(skb, req->actual);
291 status = dev->unwrap(skb);
293 || ETH_HLEN > skb->len
294 || skb->len > ETH_FRAME_LEN) {
295 dev->net->stats.rx_errors++;
296 dev->net->stats.rx_length_errors++;
297 DBG(dev, "rx length %d\n", skb->len);
301 skb->protocol = eth_type_trans(skb, dev->net);
302 dev->net->stats.rx_packets++;
303 dev->net->stats.rx_bytes += skb->len;
305 /* no buffer copies needed, unless hardware can't
308 status = netif_rx(skb);
312 /* software-driven interface shutdown */
313 case -ECONNRESET: /* unlink */
314 case -ESHUTDOWN: /* disconnect etc */
315 VDBG(dev, "rx shutdown, code %d\n", status);
318 /* for hardware automagic (such as pxa) */
319 case -ECONNABORTED: /* endpoint reset */
320 DBG(dev, "rx %s reset\n", ep->name);
321 defer_kevent(dev, WORK_RX_MEMORY);
323 dev_kfree_skb_any(skb);
328 dev->net->stats.rx_over_errors++;
332 dev->net->stats.rx_errors++;
333 DBG(dev, "rx status %d\n", status);
338 dev_kfree_skb_any(skb);
339 if (!netif_running(dev->net)) {
341 spin_lock(&dev->req_lock);
342 list_add(&req->list, &dev->rx_reqs);
343 spin_unlock(&dev->req_lock);
347 rx_submit(dev, req, GFP_ATOMIC);
350 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
353 struct usb_request *req;
358 /* queue/recycle up to N requests */
360 list_for_each_entry(req, list, list) {
365 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
367 return list_empty(list) ? -ENOMEM : 0;
368 list_add(&req->list, list);
375 struct list_head *next;
377 next = req->list.next;
378 list_del(&req->list);
379 usb_ep_free_request(ep, req);
384 req = container_of(next, struct usb_request, list);
389 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
393 spin_lock(&dev->req_lock);
394 status = prealloc(&dev->tx_reqs, link->in_ep, n);
397 status = prealloc(&dev->rx_reqs, link->out_ep, n);
402 DBG(dev, "can't alloc requests\n");
404 spin_unlock(&dev->req_lock);
408 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
410 struct usb_request *req;
413 /* fill unused rxq slots with some skb */
414 spin_lock_irqsave(&dev->req_lock, flags);
415 while (!list_empty(&dev->rx_reqs)) {
416 req = container_of(dev->rx_reqs.next,
417 struct usb_request, list);
418 list_del_init(&req->list);
419 spin_unlock_irqrestore(&dev->req_lock, flags);
421 if (rx_submit(dev, req, gfp_flags) < 0) {
422 defer_kevent(dev, WORK_RX_MEMORY);
426 spin_lock_irqsave(&dev->req_lock, flags);
428 spin_unlock_irqrestore(&dev->req_lock, flags);
431 static void eth_work(struct work_struct *work)
433 struct eth_dev *dev = container_of(work, struct eth_dev, work);
435 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
436 if (netif_running(dev->net))
437 rx_fill(dev, GFP_KERNEL);
441 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
444 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
446 struct sk_buff *skb = req->context;
447 struct eth_dev *dev = ep->driver_data;
449 switch (req->status) {
451 dev->net->stats.tx_errors++;
452 VDBG(dev, "tx err %d\n", req->status);
454 case -ECONNRESET: /* unlink */
455 case -ESHUTDOWN: /* disconnect etc */
458 dev->net->stats.tx_bytes += skb->len;
460 dev->net->stats.tx_packets++;
462 spin_lock(&dev->req_lock);
463 list_add(&req->list, &dev->tx_reqs);
464 spin_unlock(&dev->req_lock);
465 dev_kfree_skb_any(skb);
467 atomic_dec(&dev->tx_qlen);
468 if (netif_carrier_ok(dev->net))
469 netif_wake_queue(dev->net);
472 static inline int is_promisc(u16 cdc_filter)
474 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
477 static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
479 struct eth_dev *dev = netdev_priv(net);
480 int length = skb->len;
482 struct usb_request *req = NULL;
487 spin_lock_irqsave(&dev->lock, flags);
489 in = dev->port_usb->in_ep;
490 cdc_filter = dev->port_usb->cdc_filter;
495 spin_unlock_irqrestore(&dev->lock, flags);
498 dev_kfree_skb_any(skb);
502 /* apply outgoing CDC or RNDIS filters */
503 if (!is_promisc(cdc_filter)) {
504 u8 *dest = skb->data;
506 if (is_multicast_ether_addr(dest)) {
509 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
510 * SET_ETHERNET_MULTICAST_FILTERS requests
512 if (is_broadcast_ether_addr(dest))
513 type = USB_CDC_PACKET_TYPE_BROADCAST;
515 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
516 if (!(cdc_filter & type)) {
517 dev_kfree_skb_any(skb);
521 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
524 spin_lock_irqsave(&dev->req_lock, flags);
526 * this freelist can be empty if an interrupt triggered disconnect()
527 * and reconfigured the gadget (shutting down this queue) after the
528 * network stack decided to xmit but before we got the spinlock.
530 if (list_empty(&dev->tx_reqs)) {
531 spin_unlock_irqrestore(&dev->req_lock, flags);
535 req = container_of(dev->tx_reqs.next, struct usb_request, list);
536 list_del(&req->list);
538 /* temporarily stop TX queue when the freelist empties */
539 if (list_empty(&dev->tx_reqs))
540 netif_stop_queue(net);
541 spin_unlock_irqrestore(&dev->req_lock, flags);
543 /* no buffer copies needed, unless the network stack did it
544 * or the hardware can't use skb buffers.
545 * or there's not enough space for extra headers we need
548 struct sk_buff *skb_new;
550 skb_new = dev->wrap(skb);
554 dev_kfree_skb_any(skb);
558 req->buf = skb->data;
560 req->complete = tx_complete;
562 /* use zlp framing on tx for strict CDC-Ether conformance,
563 * though any robust network rx path ignores extra padding.
564 * and some hardware doesn't like to write zlps.
567 if (!dev->zlp && (length % in->maxpacket) == 0)
570 req->length = length;
572 /* throttle highspeed IRQ rate back slightly */
573 if (gadget_is_dualspeed(dev->gadget))
574 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
575 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
578 retval = usb_ep_queue(in, req, GFP_ATOMIC);
581 DBG(dev, "tx queue err %d\n", retval);
584 net->trans_start = jiffies;
585 atomic_inc(&dev->tx_qlen);
590 dev->net->stats.tx_dropped++;
591 dev_kfree_skb_any(skb);
592 spin_lock_irqsave(&dev->req_lock, flags);
593 if (list_empty(&dev->tx_reqs))
594 netif_start_queue(net);
595 list_add(&req->list, &dev->tx_reqs);
596 spin_unlock_irqrestore(&dev->req_lock, flags);
601 /*-------------------------------------------------------------------------*/
603 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
605 DBG(dev, "%s\n", __func__);
607 /* fill the rx queue */
608 rx_fill(dev, gfp_flags);
610 /* and open the tx floodgates */
611 atomic_set(&dev->tx_qlen, 0);
612 netif_wake_queue(dev->net);
615 static int eth_open(struct net_device *net)
617 struct eth_dev *dev = netdev_priv(net);
620 DBG(dev, "%s\n", __func__);
621 if (netif_carrier_ok(dev->net))
622 eth_start(dev, GFP_KERNEL);
624 spin_lock_irq(&dev->lock);
625 link = dev->port_usb;
626 if (link && link->open)
628 spin_unlock_irq(&dev->lock);
633 static int eth_stop(struct net_device *net)
635 struct eth_dev *dev = netdev_priv(net);
638 VDBG(dev, "%s\n", __func__);
639 netif_stop_queue(net);
641 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
642 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
643 dev->net->stats.rx_errors, dev->net->stats.tx_errors
646 /* ensure there are no more active requests */
647 spin_lock_irqsave(&dev->lock, flags);
649 struct gether *link = dev->port_usb;
654 /* NOTE: we have no abort-queue primitive we could use
655 * to cancel all pending I/O. Instead, we disable then
656 * reenable the endpoints ... this idiom may leave toggle
657 * wrong, but that's a self-correcting error.
659 * REVISIT: we *COULD* just let the transfers complete at
660 * their own pace; the network stack can handle old packets.
661 * For the moment we leave this here, since it works.
663 usb_ep_disable(link->in_ep);
664 usb_ep_disable(link->out_ep);
665 if (netif_carrier_ok(net)) {
666 DBG(dev, "host still using in/out endpoints\n");
667 usb_ep_enable(link->in_ep, link->in);
668 usb_ep_enable(link->out_ep, link->out);
671 spin_unlock_irqrestore(&dev->lock, flags);
676 /*-------------------------------------------------------------------------*/
678 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
679 static char *dev_addr;
680 module_param(dev_addr, charp, S_IRUGO);
681 MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
683 /* this address is invisible to ifconfig */
684 static char *host_addr;
685 module_param(host_addr, charp, S_IRUGO);
686 MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
689 static u8 __init nibble(unsigned char c)
699 static int __init get_ether_addr(const char *str, u8 *dev_addr)
704 for (i = 0; i < 6; i++) {
707 if ((*str == '.') || (*str == ':'))
709 num = nibble(*str++) << 4;
710 num |= (nibble(*str++));
713 if (is_valid_ether_addr(dev_addr))
716 random_ether_addr(dev_addr);
720 static struct eth_dev *the_dev;
724 * gether_setup - initialize one ethernet-over-usb link
725 * @g: gadget to associated with these links
726 * @ethaddr: NULL, or a buffer in which the ethernet address of the
727 * host side of the link is recorded
730 * This sets up the single network link that may be exported by a
731 * gadget driver using this framework. The link layer addresses are
732 * set up using module parameters.
734 * Returns negative errno, or zero on success
736 int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
739 struct net_device *net;
745 net = alloc_etherdev(sizeof *dev);
749 dev = netdev_priv(net);
750 spin_lock_init(&dev->lock);
751 spin_lock_init(&dev->req_lock);
752 INIT_WORK(&dev->work, eth_work);
753 INIT_LIST_HEAD(&dev->tx_reqs);
754 INIT_LIST_HEAD(&dev->rx_reqs);
756 /* network device setup */
758 strcpy(net->name, "usb%d");
760 if (get_ether_addr(dev_addr, net->dev_addr))
762 "using random %s ethernet address\n", "self");
763 if (get_ether_addr(host_addr, dev->host_mac))
765 "using random %s ethernet address\n", "host");
768 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
770 net->change_mtu = eth_change_mtu;
771 net->hard_start_xmit = eth_start_xmit;
772 net->open = eth_open;
773 net->stop = eth_stop;
774 /* watchdog_timeo, tx_timeout ... */
775 /* set_multicast_list */
776 SET_ETHTOOL_OPS(net, &ops);
778 /* two kinds of host-initiated state changes:
779 * - iff DATA transfer is active, carrier is "on"
780 * - tx queueing enabled if open *and* carrier is "on"
782 netif_stop_queue(net);
783 netif_carrier_off(net);
786 SET_NETDEV_DEV(net, &g->dev);
788 status = register_netdev(net);
790 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
793 DECLARE_MAC_BUF(tmp);
795 INFO(dev, "MAC %s\n", print_mac(tmp, net->dev_addr));
796 INFO(dev, "HOST MAC %s\n", print_mac(tmp, dev->host_mac));
805 * gether_cleanup - remove Ethernet-over-USB device
808 * This is called to free all resources allocated by @gether_setup().
810 void gether_cleanup(void)
815 unregister_netdev(the_dev->net);
816 free_netdev(the_dev->net);
818 /* assuming we used keventd, it must quiesce too */
819 flush_scheduled_work();
826 * gether_connect - notify network layer that USB link is active
827 * @link: the USB link, set up with endpoints, descriptors matching
828 * current device speed, and any framing wrapper(s) set up.
829 * Context: irqs blocked
831 * This is called to activate endpoints and let the network layer know
832 * the connection is active ("carrier detect"). It may cause the I/O
833 * queues to open and start letting network packets flow, but will in
834 * any case activate the endpoints so that they respond properly to the
837 * Verify net_device pointer returned using IS_ERR(). If it doesn't
838 * indicate some error code (negative errno), ep->driver_data values
839 * have been overwritten.
841 struct net_device *gether_connect(struct gether *link)
843 struct eth_dev *dev = the_dev;
847 return ERR_PTR(-EINVAL);
849 link->in_ep->driver_data = dev;
850 result = usb_ep_enable(link->in_ep, link->in);
852 DBG(dev, "enable %s --> %d\n",
853 link->in_ep->name, result);
857 link->out_ep->driver_data = dev;
858 result = usb_ep_enable(link->out_ep, link->out);
860 DBG(dev, "enable %s --> %d\n",
861 link->out_ep->name, result);
866 result = alloc_requests(dev, link, qlen(dev->gadget));
869 dev->zlp = link->is_zlp_ok;
870 DBG(dev, "qlen %d\n", qlen(dev->gadget));
872 dev->header_len = link->header_len;
873 dev->unwrap = link->unwrap;
874 dev->wrap = link->wrap;
876 spin_lock(&dev->lock);
877 dev->port_usb = link;
879 spin_unlock(&dev->lock);
881 netif_carrier_on(dev->net);
882 if (netif_running(dev->net))
883 eth_start(dev, GFP_ATOMIC);
885 /* on error, disable any endpoints */
887 (void) usb_ep_disable(link->out_ep);
889 (void) usb_ep_disable(link->in_ep);
892 /* caller is responsible for cleanup on error */
894 return ERR_PTR(result);
899 * gether_disconnect - notify network layer that USB link is inactive
900 * @link: the USB link, on which gether_connect() was called
901 * Context: irqs blocked
903 * This is called to deactivate endpoints and let the network layer know
904 * the connection went inactive ("no carrier").
906 * On return, the state is as if gether_connect() had never been called.
907 * The endpoints are inactive, and accordingly without active USB I/O.
908 * Pointers to endpoint descriptors and endpoint private data are nulled.
910 void gether_disconnect(struct gether *link)
912 struct eth_dev *dev = link->ioport;
913 struct usb_request *req;
919 DBG(dev, "%s\n", __func__);
921 netif_stop_queue(dev->net);
922 netif_carrier_off(dev->net);
924 /* disable endpoints, forcing (synchronous) completion
925 * of all pending i/o. then free the request objects
926 * and forget about the endpoints.
928 usb_ep_disable(link->in_ep);
929 spin_lock(&dev->req_lock);
930 while (!list_empty(&dev->tx_reqs)) {
931 req = container_of(dev->tx_reqs.next,
932 struct usb_request, list);
933 list_del(&req->list);
935 spin_unlock(&dev->req_lock);
936 usb_ep_free_request(link->in_ep, req);
937 spin_lock(&dev->req_lock);
939 spin_unlock(&dev->req_lock);
940 link->in_ep->driver_data = NULL;
943 usb_ep_disable(link->out_ep);
944 spin_lock(&dev->req_lock);
945 while (!list_empty(&dev->rx_reqs)) {
946 req = container_of(dev->rx_reqs.next,
947 struct usb_request, list);
948 list_del(&req->list);
950 spin_unlock(&dev->req_lock);
951 usb_ep_free_request(link->out_ep, req);
952 spin_lock(&dev->req_lock);
954 spin_unlock(&dev->req_lock);
955 link->out_ep->driver_data = NULL;
958 /* finish forgetting about this USB link episode */
963 spin_lock(&dev->lock);
964 dev->port_usb = NULL;
966 spin_unlock(&dev->lock);