1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
14 #include "ozprotocol.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
24 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
25 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
26 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
27 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
28 static int oz_send_isoc_frame(struct oz_pd *pd);
29 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
31 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
32 static void oz_isoc_destructor(struct sk_buff *skb);
35 * Counts the uncompleted isoc frames submitted to netcard.
37 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
39 /* Application handler functions.
41 static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
45 .start = oz_usb_start,
48 .heartbeat = oz_usb_heartbeat,
49 .farewell = oz_usb_farewell,
54 .start = oz_cdev_start,
62 * Context: softirq or process
64 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
69 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
71 case OZ_PD_S_CONNECTED:
72 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
75 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
78 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
84 * Context: softirq or process
86 void oz_pd_get(struct oz_pd *pd)
88 atomic_inc(&pd->ref_count);
92 * Context: softirq or process
94 void oz_pd_put(struct oz_pd *pd)
96 if (atomic_dec_and_test(&pd->ref_count))
101 * Context: softirq-serialized
103 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
105 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
110 atomic_set(&pd->ref_count, 2);
111 for (i = 0; i < OZ_NB_APPS; i++)
112 spin_lock_init(&pd->app_lock[i]);
113 pd->last_rx_pkt_num = 0xffffffff;
114 oz_pd_set_state(pd, OZ_PD_S_IDLE);
115 pd->max_tx_size = OZ_MAX_TX_SIZE;
116 ether_addr_copy(pd->mac_addr, mac_addr);
117 oz_elt_buf_init(&pd->elt_buff);
118 spin_lock_init(&pd->tx_frame_lock);
119 INIT_LIST_HEAD(&pd->tx_queue);
120 INIT_LIST_HEAD(&pd->farewell_list);
121 pd->last_sent_frame = &pd->tx_queue;
122 spin_lock_init(&pd->stream_lock);
123 INIT_LIST_HEAD(&pd->stream_list);
124 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
126 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
128 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
129 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
130 pd->heartbeat.function = oz_pd_heartbeat_event;
131 pd->timeout.function = oz_pd_timeout_event;
137 * Context: softirq or process
139 static void oz_pd_free(struct work_struct *work)
141 struct list_head *e, *n;
144 oz_pd_dbg(pd, ON, "Destroying PD\n");
145 pd = container_of(work, struct oz_pd, workitem);
146 /*Disable timer tasklets*/
147 tasklet_kill(&pd->heartbeat_tasklet);
148 tasklet_kill(&pd->timeout_tasklet);
150 /* Free streams, queued tx frames and farewells. */
152 list_for_each_safe(e, n, &pd->stream_list)
153 oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
155 list_for_each_safe(e, n, &pd->tx_queue) {
156 struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
160 oz_retire_frame(pd, f);
163 oz_elt_buf_term(&pd->elt_buff);
165 list_for_each_safe(e, n, &pd->farewell_list)
166 kfree(list_entry(e, struct oz_farewell, link));
169 dev_put(pd->net_dev);
174 * Context: softirq or Process
176 void oz_pd_destroy(struct oz_pd *pd)
178 if (hrtimer_active(&pd->timeout))
179 hrtimer_cancel(&pd->timeout);
180 if (hrtimer_active(&pd->heartbeat))
181 hrtimer_cancel(&pd->heartbeat);
183 INIT_WORK(&pd->workitem, oz_pd_free);
184 if (!schedule_work(&pd->workitem))
185 oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
189 * Context: softirq-serialized
191 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
195 oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
196 for (i = 0; i < OZ_NB_APPS; i++) {
197 if (g_app_if[i].start && (apps & (1 << i))) {
198 if (g_app_if[i].start(pd, resume)) {
201 "Unable to start service %d\n", i);
204 spin_lock_bh(&g_polling_lock);
205 pd->total_apps |= (1 << i);
207 pd->paused_apps &= ~(1 << i);
208 spin_unlock_bh(&g_polling_lock);
215 * Context: softirq or process
217 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
221 oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
222 for (i = 0; i < OZ_NB_APPS; i++) {
223 if (g_app_if[i].stop && (apps & (1 << i))) {
224 spin_lock_bh(&g_polling_lock);
226 pd->paused_apps |= (1 << i);
228 pd->total_apps &= ~(1 << i);
229 pd->paused_apps &= ~(1 << i);
231 spin_unlock_bh(&g_polling_lock);
232 g_app_if[i].stop(pd, pause);
240 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
244 for (i = 0; i < OZ_NB_APPS; i++) {
245 if (g_app_if[i].heartbeat && (apps & (1 << i))) {
246 if (g_app_if[i].heartbeat(pd))
250 if ((!more) && (hrtimer_active(&pd->heartbeat)))
251 hrtimer_cancel(&pd->heartbeat);
252 if (pd->mode & OZ_F_ISOC_ANYTIME) {
255 while (count-- && (oz_send_isoc_frame(pd) >= 0))
261 * Context: softirq or process
263 void oz_pd_stop(struct oz_pd *pd)
267 oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
268 oz_pd_indicate_farewells(pd);
269 spin_lock_bh(&g_polling_lock);
270 stop_apps = pd->total_apps;
273 spin_unlock_bh(&g_polling_lock);
274 oz_services_stop(pd, stop_apps, 0);
275 spin_lock_bh(&g_polling_lock);
276 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
277 /* Remove from PD list.*/
279 spin_unlock_bh(&g_polling_lock);
280 oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
287 int oz_pd_sleep(struct oz_pd *pd)
292 spin_lock_bh(&g_polling_lock);
293 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
294 spin_unlock_bh(&g_polling_lock);
297 if (pd->keep_alive && pd->session_id)
298 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
302 stop_apps = pd->total_apps;
303 spin_unlock_bh(&g_polling_lock);
307 oz_services_stop(pd, stop_apps, 1);
308 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
316 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
318 struct oz_tx_frame *f;
320 f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
322 f->total_size = sizeof(struct oz_hdr);
323 INIT_LIST_HEAD(&f->link);
324 INIT_LIST_HEAD(&f->elt_list);
330 * Context: softirq or process
332 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
334 pd->nb_queued_isoc_frames--;
335 list_del_init(&f->link);
337 kmem_cache_free(oz_tx_frame_cache, f);
339 oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
340 pd->nb_queued_isoc_frames);
344 * Context: softirq or process
346 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
348 kmem_cache_free(oz_tx_frame_cache, f);
352 * Context: softirq-serialized
354 static void oz_set_more_bit(struct sk_buff *skb)
356 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
358 oz_hdr->control |= OZ_F_MORE_DATA;
362 * Context: softirq-serialized
364 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
366 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
368 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
374 int oz_prepare_frame(struct oz_pd *pd, int empty)
376 struct oz_tx_frame *f;
378 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
380 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
382 if (!empty && !oz_are_elts_available(&pd->elt_buff))
384 f = oz_tx_frame_alloc(pd);
389 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
390 ++pd->last_tx_pkt_num;
391 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
393 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
394 pd->max_tx_size, &f->elt_list);
396 spin_lock(&pd->tx_frame_lock);
397 list_add_tail(&f->link, &pd->tx_queue);
398 pd->nb_queued_frames++;
399 spin_unlock(&pd->tx_frame_lock);
404 * Context: softirq-serialized
406 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
409 struct net_device *dev = pd->net_dev;
410 struct oz_hdr *oz_hdr;
412 struct oz_elt_info *ei;
414 /* Allocate skb with enough space for the lower layers as well
415 * as the space we need.
417 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
420 /* Reserve the head room for lower layers.
422 skb_reserve(skb, LL_RESERVED_SPACE(dev));
423 skb_reset_network_header(skb);
425 skb->protocol = htons(OZ_ETHERTYPE);
426 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
427 dev->dev_addr, skb->len) < 0)
429 /* Push the tail to the end of the area we are going to copy to.
431 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
432 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
433 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
434 /* Copy the elements into the frame body.
436 elt = (struct oz_elt *)(oz_hdr+1);
437 list_for_each_entry(ei, &f->elt_list, link) {
438 memcpy(elt, ei->data, ei->length);
439 elt = oz_next_elt(elt);
448 * Context: softirq or process
450 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
452 struct oz_elt_info *ei, *n;
454 list_for_each_entry_safe(ei, n, &f->elt_list, link) {
455 list_del_init(&ei->link);
457 ei->callback(pd, ei->context);
458 spin_lock_bh(&pd->elt_buff.lock);
459 oz_elt_info_free(&pd->elt_buff, ei);
460 spin_unlock_bh(&pd->elt_buff.lock);
462 oz_tx_frame_free(pd, f);
466 * Context: softirq-serialized
468 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
471 struct oz_tx_frame *f;
474 spin_lock(&pd->tx_frame_lock);
475 e = pd->last_sent_frame->next;
476 if (e == &pd->tx_queue) {
477 spin_unlock(&pd->tx_frame_lock);
480 f = list_entry(e, struct oz_tx_frame, link);
482 if (f->skb != NULL) {
484 oz_tx_isoc_free(pd, f);
485 spin_unlock(&pd->tx_frame_lock);
487 oz_set_more_bit(skb);
488 oz_set_last_pkt_nb(pd, skb);
489 if ((int)atomic_read(&g_submitted_isoc) <
490 OZ_MAX_SUBMITTED_ISOC) {
491 if (dev_queue_xmit(skb) < 0) {
492 oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
495 atomic_inc(&g_submitted_isoc);
496 oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
497 pd->nb_queued_isoc_frames);
501 oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
505 pd->last_sent_frame = e;
506 skb = oz_build_frame(pd, f);
507 spin_unlock(&pd->tx_frame_lock);
511 oz_set_more_bit(skb);
512 oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
513 if (dev_queue_xmit(skb) < 0)
520 * Context: softirq-serialized
522 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
524 while (oz_prepare_frame(pd, 0) >= 0)
527 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
529 case OZ_F_ISOC_NO_ELTS: {
530 backlog += pd->nb_queued_isoc_frames;
533 if (backlog > OZ_MAX_SUBMITTED_ISOC)
534 backlog = OZ_MAX_SUBMITTED_ISOC;
537 case OZ_NO_ELTS_ANYTIME: {
538 if ((backlog <= 0) && (pd->isoc_sent == 0))
549 if (oz_send_next_queued_frame(pd, backlog) < 0)
554 out: oz_prepare_frame(pd, 1);
555 oz_send_next_queued_frame(pd, 0);
561 static int oz_send_isoc_frame(struct oz_pd *pd)
564 struct net_device *dev = pd->net_dev;
565 struct oz_hdr *oz_hdr;
567 struct oz_elt_info *ei;
569 int total_size = sizeof(struct oz_hdr);
571 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
572 pd->max_tx_size, &list);
573 if (list_empty(&list))
575 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
577 oz_dbg(ON, "Cannot alloc skb\n");
578 oz_elt_info_free_chain(&pd->elt_buff, &list);
581 skb_reserve(skb, LL_RESERVED_SPACE(dev));
582 skb_reset_network_header(skb);
584 skb->protocol = htons(OZ_ETHERTYPE);
585 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
586 dev->dev_addr, skb->len) < 0) {
590 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
591 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
592 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
593 elt = (struct oz_elt *)(oz_hdr+1);
595 list_for_each_entry(ei, &list, link) {
596 memcpy(elt, ei->data, ei->length);
597 elt = oz_next_elt(elt);
600 oz_elt_info_free_chain(&pd->elt_buff, &list);
605 * Context: softirq-serialized
607 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
609 struct oz_tx_frame *f, *tmp = NULL;
615 spin_lock(&pd->tx_frame_lock);
616 list_for_each_entry(f, &pd->tx_queue, link) {
617 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
618 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
619 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
621 oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
622 pkt_num, pd->nb_queued_frames);
624 pd->nb_queued_frames--;
627 list_cut_position(&list, &pd->tx_queue, &tmp->link);
628 pd->last_sent_frame = &pd->tx_queue;
629 spin_unlock(&pd->tx_frame_lock);
631 list_for_each_entry_safe(f, tmp, &list, link)
632 oz_retire_frame(pd, f);
636 * Precondition: stream_lock must be held.
639 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
641 struct oz_isoc_stream *st;
643 list_for_each_entry(st, &pd->stream_list, link) {
644 if (st->ep_num == ep_num)
653 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
655 struct oz_isoc_stream *st =
656 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
660 spin_lock_bh(&pd->stream_lock);
661 if (!pd_stream_find(pd, ep_num)) {
662 list_add(&st->link, &pd->stream_list);
665 spin_unlock_bh(&pd->stream_lock);
671 * Context: softirq or process
673 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
682 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
684 struct oz_isoc_stream *st;
686 spin_lock_bh(&pd->stream_lock);
687 st = pd_stream_find(pd, ep_num);
690 spin_unlock_bh(&pd->stream_lock);
692 oz_isoc_stream_free(st);
699 static void oz_isoc_destructor(struct sk_buff *skb)
701 atomic_dec(&g_submitted_isoc);
707 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
709 struct net_device *dev = pd->net_dev;
710 struct oz_isoc_stream *st;
712 struct sk_buff *skb = NULL;
713 struct oz_hdr *oz_hdr = NULL;
716 spin_lock_bh(&pd->stream_lock);
717 st = pd_stream_find(pd, ep_num);
721 nb_units = st->nb_units;
726 spin_unlock_bh(&pd->stream_lock);
730 /* Allocate enough space for max size frame. */
731 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
735 /* Reserve the head room for lower layers. */
736 skb_reserve(skb, LL_RESERVED_SPACE(dev));
737 skb_reset_network_header(skb);
739 skb->protocol = htons(OZ_ETHERTYPE);
740 /* For audio packet set priority to AC_VO */
742 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
743 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
745 memcpy(skb_put(skb, len), data, len);
747 if (++nb_units < pd->ms_per_isoc) {
748 spin_lock_bh(&pd->stream_lock);
750 st->nb_units = nb_units;
753 spin_unlock_bh(&pd->stream_lock);
756 struct oz_isoc_large iso;
758 spin_lock_bh(&pd->stream_lock);
759 iso.frame_number = st->frame_num;
760 st->frame_num += nb_units;
761 spin_unlock_bh(&pd->stream_lock);
763 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
764 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
766 iso.endpoint = ep_num;
767 iso.format = OZ_DATA_F_ISOC_LARGE;
768 iso.ms_data = nb_units;
769 memcpy(oz_hdr, &oz, sizeof(oz));
770 memcpy(oz_hdr+1, &iso, sizeof(iso));
771 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
772 dev->dev_addr, skb->len) < 0)
775 skb->destructor = oz_isoc_destructor;
776 /*Queue for Xmit if mode is not ANYTIME*/
777 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
778 struct oz_tx_frame *isoc_unit = NULL;
779 int nb = pd->nb_queued_isoc_frames;
781 if (nb >= pd->isoc_latency) {
782 struct oz_tx_frame *f;
784 oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
786 spin_lock(&pd->tx_frame_lock);
787 list_for_each_entry(f, &pd->tx_queue, link) {
788 if (f->skb != NULL) {
789 oz_tx_isoc_free(pd, f);
793 spin_unlock(&pd->tx_frame_lock);
795 isoc_unit = oz_tx_frame_alloc(pd);
796 if (isoc_unit == NULL)
799 isoc_unit->skb = skb;
800 spin_lock_bh(&pd->tx_frame_lock);
801 list_add_tail(&isoc_unit->link, &pd->tx_queue);
802 pd->nb_queued_isoc_frames++;
803 spin_unlock_bh(&pd->tx_frame_lock);
805 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
806 pd->nb_queued_isoc_frames, pd->nb_queued_frames);
810 /*In ANYTIME mode Xmit unit immediately*/
811 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
812 atomic_inc(&g_submitted_isoc);
813 if (dev_queue_xmit(skb) < 0)
828 void oz_apps_init(void)
832 for (i = 0; i < OZ_NB_APPS; i++) {
833 if (g_app_if[i].init)
841 void oz_apps_term(void)
845 /* Terminate all the apps. */
846 for (i = 0; i < OZ_NB_APPS; i++) {
847 if (g_app_if[i].term)
853 * Context: softirq-serialized
855 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
857 if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
858 g_app_if[app_id].rx(pd, elt);
862 * Context: softirq or process
864 void oz_pd_indicate_farewells(struct oz_pd *pd)
866 struct oz_farewell *f;
867 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
870 spin_lock_bh(&g_polling_lock);
871 if (list_empty(&pd->farewell_list)) {
872 spin_unlock_bh(&g_polling_lock);
875 f = list_first_entry(&pd->farewell_list,
876 struct oz_farewell, link);
878 spin_unlock_bh(&g_polling_lock);
880 ai->farewell(pd, f->ep_num, f->report, f->len);