2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
49 #define UETH__VERSION "29-May-2008"
51 static struct workqueue_struct *uether_wq;
54 /* lock is held while accessing port_usb
57 struct gether *port_usb;
59 struct net_device *net;
60 struct usb_gadget *gadget;
62 spinlock_t req_lock; /* guard {rx,tx}_reqs */
63 struct list_head tx_reqs, rx_reqs;
66 struct sk_buff_head rx_frames;
69 unsigned ul_max_pkts_per_xfer;
70 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
71 int (*unwrap)(struct gether *,
73 struct sk_buff_head *list);
75 struct work_struct work;
76 struct work_struct rx_work;
79 #define WORK_RX_MEMORY 0
82 u8 host_mac[ETH_ALEN];
85 /*-------------------------------------------------------------------------*/
87 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
89 #define DEFAULT_QLEN 2 /* double buffering by default */
91 static unsigned qmult = 5;
92 module_param(qmult, uint, S_IRUGO|S_IWUSR);
93 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
95 /* for dual-speed hardware, use deeper queues at high/super speed */
96 static inline int qlen(struct usb_gadget *gadget)
98 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
99 gadget->speed == USB_SPEED_SUPER))
100 return qmult * DEFAULT_QLEN;
105 /*-------------------------------------------------------------------------*/
107 /* REVISIT there must be a better way than having two sets
116 #define xprintk(d, level, fmt, args...) \
117 printk(level "%s: " fmt , (d)->net->name , ## args)
121 #define DBG(dev, fmt, args...) \
122 xprintk(dev , KERN_DEBUG , fmt , ## args)
124 #define DBG(dev, fmt, args...) \
131 #define VDBG(dev, fmt, args...) \
135 #define ERROR(dev, fmt, args...) \
136 xprintk(dev , KERN_ERR , fmt , ## args)
137 #define INFO(dev, fmt, args...) \
138 xprintk(dev , KERN_INFO , fmt , ## args)
140 /*-------------------------------------------------------------------------*/
142 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
144 static int ueth_change_mtu(struct net_device *net, int new_mtu)
146 struct eth_dev *dev = netdev_priv(net);
150 /* don't change MTU on "live" link (peer won't know) */
151 spin_lock_irqsave(&dev->lock, flags);
154 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
158 spin_unlock_irqrestore(&dev->lock, flags);
163 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
165 struct eth_dev *dev = netdev_priv(net);
167 strlcpy(p->driver, "g_ether", sizeof(p->driver));
168 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
169 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
170 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
173 /* REVISIT can also support:
174 * - WOL (by tracking suspends and issuing remote wakeup)
175 * - msglevel (implies updated messaging)
176 * - ... probably more ethtool ops
179 static const struct ethtool_ops ops = {
180 .get_drvinfo = eth_get_drvinfo,
181 .get_link = ethtool_op_get_link,
184 static void defer_kevent(struct eth_dev *dev, int flag)
186 if (test_and_set_bit(flag, &dev->todo))
188 if (!schedule_work(&dev->work))
189 ERROR(dev, "kevent %d may have been dropped\n", flag);
191 DBG(dev, "kevent %d scheduled\n", flag);
194 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
197 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
200 int retval = -ENOMEM;
205 spin_lock_irqsave(&dev->lock, flags);
207 out = dev->port_usb->out_ep;
210 spin_unlock_irqrestore(&dev->lock, flags);
216 /* Padding up to RX_EXTRA handles minor disagreements with host.
217 * Normally we use the USB "terminate on short read" convention;
218 * so allow up to (N*maxpacket), since that memory is normally
219 * already allocated. Some hardware doesn't deal well with short
220 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
221 * byte off the end (to force hardware errors on overflow).
223 * RNDIS uses internal framing, and explicitly allows senders to
224 * pad to end-of-packet. That's potentially nice for speed, but
225 * means receivers can't recover lost synch on their own (because
226 * new packets don't only start after a short RX).
228 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
229 size += dev->port_usb->header_len;
230 size += out->maxpacket - 1;
231 size -= size % out->maxpacket;
233 if (dev->ul_max_pkts_per_xfer)
234 size *= dev->ul_max_pkts_per_xfer;
236 if (dev->port_usb->is_fixed)
237 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
239 DBG(dev, "%s: size: %d\n", __func__, size);
240 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
242 DBG(dev, "no rx skb\n");
246 /* Some platforms perform better when IP packets are aligned,
247 * but on at least one, checksumming fails otherwise. Note:
248 * RNDIS headers involve variable numbers of LE32 values.
250 skb_reserve(skb, NET_IP_ALIGN);
252 req->buf = skb->data;
254 req->complete = rx_complete;
257 retval = usb_ep_queue(out, req, gfp_flags);
258 if (retval == -ENOMEM)
260 defer_kevent(dev, WORK_RX_MEMORY);
262 DBG(dev, "rx submit --> %d\n", retval);
264 dev_kfree_skb_any(skb);
269 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
271 struct sk_buff *skb = req->context;
272 struct eth_dev *dev = ep->driver_data;
273 int status = req->status;
278 /* normal completion */
280 skb_put(skb, req->actual);
285 spin_lock_irqsave(&dev->lock, flags);
287 status = dev->unwrap(dev->port_usb,
291 dev_kfree_skb_any(skb);
294 spin_unlock_irqrestore(&dev->lock, flags);
296 skb_queue_tail(&dev->rx_frames, skb);
303 /* software-driven interface shutdown */
304 case -ECONNRESET: /* unlink */
305 case -ESHUTDOWN: /* disconnect etc */
306 VDBG(dev, "rx shutdown, code %d\n", status);
309 /* for hardware automagic (such as pxa) */
310 case -ECONNABORTED: /* endpoint reset */
311 DBG(dev, "rx %s reset\n", ep->name);
312 defer_kevent(dev, WORK_RX_MEMORY);
314 dev_kfree_skb_any(skb);
319 dev->net->stats.rx_over_errors++;
324 dev_kfree_skb_any(skb);
325 dev->net->stats.rx_errors++;
326 DBG(dev, "rx status %d\n", status);
331 spin_lock(&dev->req_lock);
332 list_add(&req->list, &dev->rx_reqs);
333 spin_unlock(&dev->req_lock);
336 queue_work(uether_wq, &dev->rx_work);
339 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
342 struct usb_request *req;
347 /* queue/recycle up to N requests */
349 list_for_each_entry(req, list, list) {
354 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
356 return list_empty(list) ? -ENOMEM : 0;
357 list_add(&req->list, list);
364 struct list_head *next;
366 next = req->list.next;
367 list_del(&req->list);
368 usb_ep_free_request(ep, req);
373 req = container_of(next, struct usb_request, list);
378 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
382 spin_lock(&dev->req_lock);
383 status = prealloc(&dev->tx_reqs, link->in_ep, n);
386 status = prealloc(&dev->rx_reqs, link->out_ep, n);
391 DBG(dev, "can't alloc requests\n");
393 spin_unlock(&dev->req_lock);
397 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
399 struct usb_request *req;
403 /* fill unused rxq slots with some skb */
404 spin_lock_irqsave(&dev->req_lock, flags);
405 while (!list_empty(&dev->rx_reqs)) {
406 /* break the nexus of continuous completion and re-submission*/
407 if (++req_cnt > qlen(dev->gadget))
410 req = container_of(dev->rx_reqs.next,
411 struct usb_request, list);
412 list_del_init(&req->list);
413 spin_unlock_irqrestore(&dev->req_lock, flags);
415 if (rx_submit(dev, req, gfp_flags) < 0) {
416 spin_lock_irqsave(&dev->req_lock, flags);
417 list_add(&req->list, &dev->rx_reqs);
418 spin_unlock_irqrestore(&dev->req_lock, flags);
419 defer_kevent(dev, WORK_RX_MEMORY);
423 spin_lock_irqsave(&dev->req_lock, flags);
425 spin_unlock_irqrestore(&dev->req_lock, flags);
428 static void process_rx_w(struct work_struct *work)
430 struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
437 while ((skb = skb_dequeue(&dev->rx_frames))) {
439 || ETH_HLEN > skb->len
440 || skb->len > ETH_FRAME_LEN) {
441 dev->net->stats.rx_errors++;
442 dev->net->stats.rx_length_errors++;
443 DBG(dev, "rx length %d\n", skb->len);
444 dev_kfree_skb_any(skb);
447 skb->protocol = eth_type_trans(skb, dev->net);
448 dev->net->stats.rx_packets++;
449 dev->net->stats.rx_bytes += skb->len;
451 status = netif_rx_ni(skb);
454 if (netif_running(dev->net))
455 rx_fill(dev, GFP_KERNEL);
458 static void eth_work(struct work_struct *work)
460 struct eth_dev *dev = container_of(work, struct eth_dev, work);
462 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
463 if (netif_running(dev->net))
464 rx_fill(dev, GFP_KERNEL);
468 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
471 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
473 struct sk_buff *skb = req->context;
474 struct eth_dev *dev = ep->driver_data;
476 switch (req->status) {
478 dev->net->stats.tx_errors++;
479 VDBG(dev, "tx err %d\n", req->status);
481 case -ECONNRESET: /* unlink */
482 case -ESHUTDOWN: /* disconnect etc */
485 dev->net->stats.tx_bytes += skb->len;
487 dev->net->stats.tx_packets++;
489 spin_lock(&dev->req_lock);
490 list_add(&req->list, &dev->tx_reqs);
491 spin_unlock(&dev->req_lock);
492 dev_kfree_skb_any(skb);
494 atomic_dec(&dev->tx_qlen);
495 if (netif_carrier_ok(dev->net))
496 netif_wake_queue(dev->net);
499 static inline int is_promisc(u16 cdc_filter)
501 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
504 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
505 struct net_device *net)
507 struct eth_dev *dev = netdev_priv(net);
508 int length = skb->len;
510 struct usb_request *req = NULL;
515 spin_lock_irqsave(&dev->lock, flags);
517 in = dev->port_usb->in_ep;
518 cdc_filter = dev->port_usb->cdc_filter;
523 spin_unlock_irqrestore(&dev->lock, flags);
526 dev_kfree_skb_any(skb);
530 /* apply outgoing CDC or RNDIS filters */
531 if (!is_promisc(cdc_filter)) {
532 u8 *dest = skb->data;
534 if (is_multicast_ether_addr(dest)) {
537 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
538 * SET_ETHERNET_MULTICAST_FILTERS requests
540 if (is_broadcast_ether_addr(dest))
541 type = USB_CDC_PACKET_TYPE_BROADCAST;
543 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
544 if (!(cdc_filter & type)) {
545 dev_kfree_skb_any(skb);
549 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
552 spin_lock_irqsave(&dev->req_lock, flags);
554 * this freelist can be empty if an interrupt triggered disconnect()
555 * and reconfigured the gadget (shutting down this queue) after the
556 * network stack decided to xmit but before we got the spinlock.
558 if (list_empty(&dev->tx_reqs)) {
559 spin_unlock_irqrestore(&dev->req_lock, flags);
560 return NETDEV_TX_BUSY;
563 req = container_of(dev->tx_reqs.next, struct usb_request, list);
564 list_del(&req->list);
566 /* temporarily stop TX queue when the freelist empties */
567 if (list_empty(&dev->tx_reqs))
568 netif_stop_queue(net);
569 spin_unlock_irqrestore(&dev->req_lock, flags);
571 /* no buffer copies needed, unless the network stack did it
572 * or the hardware can't use skb buffers.
573 * or there's not enough space for extra headers we need
578 spin_lock_irqsave(&dev->lock, flags);
580 skb = dev->wrap(dev->port_usb, skb);
581 spin_unlock_irqrestore(&dev->lock, flags);
587 req->buf = skb->data;
589 req->complete = tx_complete;
591 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
592 if (dev->port_usb->is_fixed &&
593 length == dev->port_usb->fixed_in_len &&
594 (length % in->maxpacket) == 0)
599 /* use zlp framing on tx for strict CDC-Ether conformance,
600 * though any robust network rx path ignores extra padding.
601 * and some hardware doesn't like to write zlps.
603 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
606 req->length = length;
608 /* throttle high/super speed IRQ rate back slightly */
609 if (gadget_is_dualspeed(dev->gadget))
610 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
611 dev->gadget->speed == USB_SPEED_SUPER)
612 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
615 retval = usb_ep_queue(in, req, GFP_ATOMIC);
618 DBG(dev, "tx queue err %d\n", retval);
621 net->trans_start = jiffies;
622 atomic_inc(&dev->tx_qlen);
626 dev_kfree_skb_any(skb);
628 dev->net->stats.tx_dropped++;
629 spin_lock_irqsave(&dev->req_lock, flags);
630 if (list_empty(&dev->tx_reqs))
631 netif_start_queue(net);
632 list_add(&req->list, &dev->tx_reqs);
633 spin_unlock_irqrestore(&dev->req_lock, flags);
638 /*-------------------------------------------------------------------------*/
640 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
642 DBG(dev, "%s\n", __func__);
644 /* fill the rx queue */
645 rx_fill(dev, gfp_flags);
647 /* and open the tx floodgates */
648 atomic_set(&dev->tx_qlen, 0);
649 netif_wake_queue(dev->net);
652 static int eth_open(struct net_device *net)
654 struct eth_dev *dev = netdev_priv(net);
657 DBG(dev, "%s\n", __func__);
658 if (netif_carrier_ok(dev->net))
659 eth_start(dev, GFP_KERNEL);
661 spin_lock_irq(&dev->lock);
662 link = dev->port_usb;
663 if (link && link->open)
665 spin_unlock_irq(&dev->lock);
670 static int eth_stop(struct net_device *net)
672 struct eth_dev *dev = netdev_priv(net);
675 VDBG(dev, "%s\n", __func__);
676 netif_stop_queue(net);
678 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
679 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
680 dev->net->stats.rx_errors, dev->net->stats.tx_errors
683 /* ensure there are no more active requests */
684 spin_lock_irqsave(&dev->lock, flags);
686 struct gether *link = dev->port_usb;
687 const struct usb_endpoint_descriptor *in;
688 const struct usb_endpoint_descriptor *out;
693 /* NOTE: we have no abort-queue primitive we could use
694 * to cancel all pending I/O. Instead, we disable then
695 * reenable the endpoints ... this idiom may leave toggle
696 * wrong, but that's a self-correcting error.
698 * REVISIT: we *COULD* just let the transfers complete at
699 * their own pace; the network stack can handle old packets.
700 * For the moment we leave this here, since it works.
702 in = link->in_ep->desc;
703 out = link->out_ep->desc;
704 usb_ep_disable(link->in_ep);
705 usb_ep_disable(link->out_ep);
706 if (netif_carrier_ok(net)) {
707 DBG(dev, "host still using in/out endpoints\n");
708 link->in_ep->desc = in;
709 link->out_ep->desc = out;
710 usb_ep_enable(link->in_ep);
711 usb_ep_enable(link->out_ep);
714 spin_unlock_irqrestore(&dev->lock, flags);
719 /*-------------------------------------------------------------------------*/
721 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
722 static char *dev_addr;
723 module_param(dev_addr, charp, S_IRUGO);
724 MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
726 /* this address is invisible to ifconfig */
727 static char *host_addr;
728 module_param(host_addr, charp, S_IRUGO);
729 MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
731 static int get_ether_addr(const char *str, u8 *dev_addr)
736 for (i = 0; i < 6; i++) {
739 if ((*str == '.') || (*str == ':'))
741 num = hex_to_bin(*str++) << 4;
742 num |= hex_to_bin(*str++);
745 if (is_valid_ether_addr(dev_addr))
748 eth_random_addr(dev_addr);
752 static const struct net_device_ops eth_netdev_ops = {
753 .ndo_open = eth_open,
754 .ndo_stop = eth_stop,
755 .ndo_start_xmit = eth_start_xmit,
756 .ndo_change_mtu = ueth_change_mtu,
757 .ndo_set_mac_address = eth_mac_addr,
758 .ndo_validate_addr = eth_validate_addr,
761 static struct device_type gadget_type = {
766 * gether_setup_name - initialize one ethernet-over-usb link
767 * @g: gadget to associated with these links
768 * @ethaddr: NULL, or a buffer in which the ethernet address of the
769 * host side of the link is recorded
770 * @netname: name for network device (for example, "usb")
773 * This sets up the single network link that may be exported by a
774 * gadget driver using this framework. The link layer addresses are
775 * set up using module parameters.
777 * Returns negative errno, or zero on success
779 struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
783 struct net_device *net;
786 net = alloc_etherdev(sizeof *dev);
788 return ERR_PTR(-ENOMEM);
790 dev = netdev_priv(net);
791 spin_lock_init(&dev->lock);
792 spin_lock_init(&dev->req_lock);
793 INIT_WORK(&dev->work, eth_work);
794 INIT_WORK(&dev->rx_work, process_rx_w);
795 INIT_LIST_HEAD(&dev->tx_reqs);
796 INIT_LIST_HEAD(&dev->rx_reqs);
798 skb_queue_head_init(&dev->rx_frames);
800 /* network device setup */
802 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
804 if (get_ether_addr(dev_addr, net->dev_addr))
806 "using random %s ethernet address\n", "self");
807 if (get_ether_addr(host_addr, dev->host_mac))
809 "using random %s ethernet address\n", "host");
812 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
814 net->netdev_ops = ð_netdev_ops;
816 SET_ETHTOOL_OPS(net, &ops);
819 SET_NETDEV_DEV(net, &g->dev);
820 SET_NETDEV_DEVTYPE(net, &gadget_type);
822 status = register_netdev(net);
824 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
826 dev = ERR_PTR(status);
828 INFO(dev, "MAC %pM\n", net->dev_addr);
829 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
831 /* two kinds of host-initiated state changes:
832 * - iff DATA transfer is active, carrier is "on"
833 * - tx queueing enabled if open *and* carrier is "on"
835 netif_carrier_off(net);
842 * gether_cleanup - remove Ethernet-over-USB device
845 * This is called to free all resources allocated by @gether_setup().
847 void gether_cleanup(struct eth_dev *dev)
852 unregister_netdev(dev->net);
853 flush_work(&dev->work);
854 free_netdev(dev->net);
858 * gether_connect - notify network layer that USB link is active
859 * @link: the USB link, set up with endpoints, descriptors matching
860 * current device speed, and any framing wrapper(s) set up.
861 * Context: irqs blocked
863 * This is called to activate endpoints and let the network layer know
864 * the connection is active ("carrier detect"). It may cause the I/O
865 * queues to open and start letting network packets flow, but will in
866 * any case activate the endpoints so that they respond properly to the
869 * Verify net_device pointer returned using IS_ERR(). If it doesn't
870 * indicate some error code (negative errno), ep->driver_data values
871 * have been overwritten.
873 struct net_device *gether_connect(struct gether *link)
875 struct eth_dev *dev = link->ioport;
879 return ERR_PTR(-EINVAL);
881 link->in_ep->driver_data = dev;
882 result = usb_ep_enable(link->in_ep);
884 DBG(dev, "enable %s --> %d\n",
885 link->in_ep->name, result);
889 link->out_ep->driver_data = dev;
890 result = usb_ep_enable(link->out_ep);
892 DBG(dev, "enable %s --> %d\n",
893 link->out_ep->name, result);
898 result = alloc_requests(dev, link, qlen(dev->gadget));
901 dev->zlp = link->is_zlp_ok;
902 DBG(dev, "qlen %d\n", qlen(dev->gadget));
904 dev->header_len = link->header_len;
905 dev->unwrap = link->unwrap;
906 dev->wrap = link->wrap;
907 dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
909 spin_lock(&dev->lock);
910 dev->port_usb = link;
911 if (netif_running(dev->net)) {
918 spin_unlock(&dev->lock);
920 netif_carrier_on(dev->net);
921 if (netif_running(dev->net))
922 eth_start(dev, GFP_ATOMIC);
924 /* on error, disable any endpoints */
926 (void) usb_ep_disable(link->out_ep);
928 (void) usb_ep_disable(link->in_ep);
931 /* caller is responsible for cleanup on error */
933 return ERR_PTR(result);
938 * gether_disconnect - notify network layer that USB link is inactive
939 * @link: the USB link, on which gether_connect() was called
940 * Context: irqs blocked
942 * This is called to deactivate endpoints and let the network layer know
943 * the connection went inactive ("no carrier").
945 * On return, the state is as if gether_connect() had never been called.
946 * The endpoints are inactive, and accordingly without active USB I/O.
947 * Pointers to endpoint descriptors and endpoint private data are nulled.
949 void gether_disconnect(struct gether *link)
951 struct eth_dev *dev = link->ioport;
952 struct usb_request *req;
959 DBG(dev, "%s\n", __func__);
961 netif_stop_queue(dev->net);
962 netif_carrier_off(dev->net);
964 /* disable endpoints, forcing (synchronous) completion
965 * of all pending i/o. then free the request objects
966 * and forget about the endpoints.
968 usb_ep_disable(link->in_ep);
969 spin_lock(&dev->req_lock);
970 while (!list_empty(&dev->tx_reqs)) {
971 req = container_of(dev->tx_reqs.next,
972 struct usb_request, list);
973 list_del(&req->list);
975 spin_unlock(&dev->req_lock);
976 usb_ep_free_request(link->in_ep, req);
977 spin_lock(&dev->req_lock);
979 spin_unlock(&dev->req_lock);
980 link->in_ep->driver_data = NULL;
981 link->in_ep->desc = NULL;
983 usb_ep_disable(link->out_ep);
984 spin_lock(&dev->req_lock);
985 while (!list_empty(&dev->rx_reqs)) {
986 req = container_of(dev->rx_reqs.next,
987 struct usb_request, list);
988 list_del(&req->list);
990 spin_unlock(&dev->req_lock);
991 usb_ep_free_request(link->out_ep, req);
992 spin_lock(&dev->req_lock);
994 spin_unlock(&dev->req_lock);
996 spin_lock(&dev->rx_frames.lock);
997 while ((skb = __skb_dequeue(&dev->rx_frames)))
998 dev_kfree_skb_any(skb);
999 spin_unlock(&dev->rx_frames.lock);
1001 link->out_ep->driver_data = NULL;
1002 link->out_ep->desc = NULL;
1004 /* finish forgetting about this USB link episode */
1005 dev->header_len = 0;
1009 spin_lock(&dev->lock);
1010 dev->port_usb = NULL;
1011 spin_unlock(&dev->lock);
1014 static int __init gether_init(void)
1016 uether_wq = create_singlethread_workqueue("uether");
1018 pr_err("%s: Unable to create workqueue: uether\n", __func__);
1023 module_init(gether_init);
1025 static void __exit gether_exit(void)
1027 destroy_workqueue(uether_wq);
1030 module_exit(gether_exit);
1031 MODULE_DESCRIPTION("ethernet over USB driver");
1032 MODULE_LICENSE("GPL v2");