1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
13 #include "ozprotocol.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
23 /*------------------------------------------------------------------------------
25 #define OZ_MAX_TX_POOL_SIZE 6
26 /*------------------------------------------------------------------------------
28 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
29 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
31 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
32 static int oz_send_isoc_frame(struct oz_pd *pd);
33 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
34 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
35 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
36 static void oz_isoc_destructor(struct sk_buff *skb);
37 static int oz_def_app_init(void);
38 static void oz_def_app_term(void);
39 static int oz_def_app_start(struct oz_pd *pd, int resume);
40 static void oz_def_app_stop(struct oz_pd *pd, int pause);
41 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
42 /*------------------------------------------------------------------------------
43 * Counts the uncompleted isoc frames submitted to netcard.
45 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
46 /* Application handler functions.
48 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
85 /*------------------------------------------------------------------------------
88 static int oz_def_app_init(void)
92 /*------------------------------------------------------------------------------
95 static void oz_def_app_term(void)
98 /*------------------------------------------------------------------------------
101 static int oz_def_app_start(struct oz_pd *pd, int resume)
105 /*------------------------------------------------------------------------------
108 static void oz_def_app_stop(struct oz_pd *pd, int pause)
111 /*------------------------------------------------------------------------------
114 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
117 /*------------------------------------------------------------------------------
118 * Context: softirq or process
120 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
126 oz_trace("PD State: OZ_PD_S_IDLE\n");
128 case OZ_PD_S_CONNECTED:
129 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
131 case OZ_PD_S_STOPPED:
132 oz_trace("PD State: OZ_PD_S_STOPPED\n");
135 oz_trace("PD State: OZ_PD_S_SLEEP\n");
138 #endif /* WANT_TRACE */
140 /*------------------------------------------------------------------------------
141 * Context: softirq or process
143 void oz_pd_get(struct oz_pd *pd)
145 atomic_inc(&pd->ref_count);
147 /*------------------------------------------------------------------------------
148 * Context: softirq or process
150 void oz_pd_put(struct oz_pd *pd)
152 if (atomic_dec_and_test(&pd->ref_count))
155 /*------------------------------------------------------------------------------
156 * Context: softirq-serialized
158 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
160 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
163 atomic_set(&pd->ref_count, 2);
164 for (i = 0; i < OZ_APPID_MAX; i++)
165 spin_lock_init(&pd->app_lock[i]);
166 pd->last_rx_pkt_num = 0xffffffff;
167 oz_pd_set_state(pd, OZ_PD_S_IDLE);
168 pd->max_tx_size = OZ_MAX_TX_SIZE;
169 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
170 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
174 spin_lock_init(&pd->tx_frame_lock);
175 INIT_LIST_HEAD(&pd->tx_queue);
176 INIT_LIST_HEAD(&pd->farewell_list);
177 pd->last_sent_frame = &pd->tx_queue;
178 spin_lock_init(&pd->stream_lock);
179 INIT_LIST_HEAD(&pd->stream_list);
183 /*------------------------------------------------------------------------------
184 * Context: softirq or process
186 void oz_pd_destroy(struct oz_pd *pd)
189 struct oz_tx_frame *f;
190 struct oz_isoc_stream *st;
191 struct oz_farewell *fwell;
192 oz_trace("Destroying PD\n");
193 /* Delete any streams.
195 e = pd->stream_list.next;
196 while (e != &pd->stream_list) {
197 st = container_of(e, struct oz_isoc_stream, link);
199 oz_isoc_stream_free(st);
201 /* Free any queued tx frames.
203 e = pd->tx_queue.next;
204 while (e != &pd->tx_queue) {
205 f = container_of(e, struct oz_tx_frame, link);
209 oz_retire_frame(pd, f);
211 oz_elt_buf_term(&pd->elt_buff);
212 /* Free any farewells.
214 e = pd->farewell_list.next;
215 while (e != &pd->farewell_list) {
216 fwell = container_of(e, struct oz_farewell, link);
220 /* Deallocate all frames in tx pool.
222 while (pd->tx_pool) {
224 pd->tx_pool = e->next;
225 kfree(container_of(e, struct oz_tx_frame, link));
228 dev_put(pd->net_dev);
231 /*------------------------------------------------------------------------------
232 * Context: softirq-serialized
234 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
236 const struct oz_app_if *ai;
238 oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
239 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
240 if (apps & (1<<ai->app_id)) {
241 if (ai->start(pd, resume)) {
243 oz_trace("Unabled to start service %d\n",
247 oz_polling_lock_bh();
248 pd->total_apps |= (1<<ai->app_id);
250 pd->paused_apps &= ~(1<<ai->app_id);
251 oz_polling_unlock_bh();
256 /*------------------------------------------------------------------------------
257 * Context: softirq or process
259 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
261 const struct oz_app_if *ai;
262 oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
263 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
264 if (apps & (1<<ai->app_id)) {
265 oz_polling_lock_bh();
267 pd->paused_apps |= (1<<ai->app_id);
269 pd->total_apps &= ~(1<<ai->app_id);
270 pd->paused_apps &= ~(1<<ai->app_id);
272 oz_polling_unlock_bh();
277 /*------------------------------------------------------------------------------
280 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
282 const struct oz_app_if *ai;
284 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
285 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
286 if (ai->heartbeat(pd))
291 oz_pd_request_heartbeat(pd);
292 if (pd->mode & OZ_F_ISOC_ANYTIME) {
294 while (count-- && (oz_send_isoc_frame(pd) >= 0))
298 /*------------------------------------------------------------------------------
299 * Context: softirq or process
301 void oz_pd_stop(struct oz_pd *pd)
304 oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
305 oz_pd_indicate_farewells(pd);
306 oz_polling_lock_bh();
307 stop_apps = pd->total_apps;
310 oz_polling_unlock_bh();
311 oz_services_stop(pd, stop_apps, 0);
312 oz_polling_lock_bh();
313 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
314 /* Remove from PD list.*/
316 oz_polling_unlock_bh();
317 oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
318 oz_timer_delete(pd, 0);
321 /*------------------------------------------------------------------------------
324 int oz_pd_sleep(struct oz_pd *pd)
328 oz_polling_lock_bh();
329 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
330 oz_polling_unlock_bh();
333 if (pd->keep_alive_j && pd->session_id) {
334 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
335 pd->pulse_time_j = jiffies + pd->keep_alive_j;
336 oz_trace("Sleep Now %lu until %lu\n",
337 jiffies, pd->pulse_time_j);
341 stop_apps = pd->total_apps;
342 oz_polling_unlock_bh();
346 oz_services_stop(pd, stop_apps, 1);
347 oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
351 /*------------------------------------------------------------------------------
354 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
356 struct oz_tx_frame *f = NULL;
357 spin_lock_bh(&pd->tx_frame_lock);
359 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
360 pd->tx_pool = pd->tx_pool->next;
363 spin_unlock_bh(&pd->tx_frame_lock);
365 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
367 f->total_size = sizeof(struct oz_hdr);
368 INIT_LIST_HEAD(&f->link);
369 INIT_LIST_HEAD(&f->elt_list);
373 /*------------------------------------------------------------------------------
374 * Context: softirq or process
376 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
378 pd->nb_queued_isoc_frames--;
379 list_del_init(&f->link);
380 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
381 f->link.next = pd->tx_pool;
382 pd->tx_pool = &f->link;
387 oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
388 pd->nb_queued_isoc_frames);
390 /*------------------------------------------------------------------------------
391 * Context: softirq or process
393 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
395 spin_lock_bh(&pd->tx_frame_lock);
396 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
397 f->link.next = pd->tx_pool;
398 pd->tx_pool = &f->link;
402 spin_unlock_bh(&pd->tx_frame_lock);
405 /*------------------------------------------------------------------------------
406 * Context: softirq-serialized
408 static void oz_set_more_bit(struct sk_buff *skb)
410 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
411 oz_hdr->control |= OZ_F_MORE_DATA;
413 /*------------------------------------------------------------------------------
414 * Context: softirq-serialized
416 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
418 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
419 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
421 /*------------------------------------------------------------------------------
424 int oz_prepare_frame(struct oz_pd *pd, int empty)
426 struct oz_tx_frame *f;
427 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
429 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
431 if (!empty && !oz_are_elts_available(&pd->elt_buff))
433 f = oz_tx_frame_alloc(pd);
438 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
439 ++pd->last_tx_pkt_num;
440 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
442 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
443 pd->max_tx_size, &f->elt_list);
445 spin_lock(&pd->tx_frame_lock);
446 list_add_tail(&f->link, &pd->tx_queue);
447 pd->nb_queued_frames++;
448 spin_unlock(&pd->tx_frame_lock);
451 /*------------------------------------------------------------------------------
452 * Context: softirq-serialized
454 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
457 struct net_device *dev = pd->net_dev;
458 struct oz_hdr *oz_hdr;
461 /* Allocate skb with enough space for the lower layers as well
462 * as the space we need.
464 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
467 /* Reserve the head room for lower layers.
469 skb_reserve(skb, LL_RESERVED_SPACE(dev));
470 skb_reset_network_header(skb);
472 skb->protocol = htons(OZ_ETHERTYPE);
473 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
474 dev->dev_addr, skb->len) < 0)
476 /* Push the tail to the end of the area we are going to copy to.
478 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
479 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
480 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
481 /* Copy the elements into the frame body.
483 elt = (struct oz_elt *)(oz_hdr+1);
484 for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
485 struct oz_elt_info *ei;
486 ei = container_of(e, struct oz_elt_info, link);
487 memcpy(elt, ei->data, ei->length);
488 elt = oz_next_elt(elt);
495 /*------------------------------------------------------------------------------
496 * Context: softirq or process
498 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
501 struct oz_elt_info *ei;
502 e = f->elt_list.next;
503 while (e != &f->elt_list) {
504 ei = container_of(e, struct oz_elt_info, link);
506 list_del_init(&ei->link);
508 ei->callback(pd, ei->context);
509 spin_lock_bh(&pd->elt_buff.lock);
510 oz_elt_info_free(&pd->elt_buff, ei);
511 spin_unlock_bh(&pd->elt_buff.lock);
513 oz_tx_frame_free(pd, f);
514 if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
515 oz_trim_elt_pool(&pd->elt_buff);
517 /*------------------------------------------------------------------------------
518 * Context: softirq-serialized
520 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
523 struct oz_tx_frame *f;
525 spin_lock(&pd->tx_frame_lock);
526 e = pd->last_sent_frame->next;
527 if (e == &pd->tx_queue) {
528 spin_unlock(&pd->tx_frame_lock);
531 f = container_of(e, struct oz_tx_frame, link);
533 if (f->skb != NULL) {
535 oz_tx_isoc_free(pd, f);
536 spin_unlock(&pd->tx_frame_lock);
538 oz_set_more_bit(skb);
539 oz_set_last_pkt_nb(pd, skb);
540 if ((int)atomic_read(&g_submitted_isoc) <
541 OZ_MAX_SUBMITTED_ISOC) {
542 if (dev_queue_xmit(skb) < 0) {
543 oz_trace2(OZ_TRACE_TX_FRAMES,
544 "Dropping ISOC Frame\n");
547 atomic_inc(&g_submitted_isoc);
548 oz_trace2(OZ_TRACE_TX_FRAMES,
549 "Sending ISOC Frame, nb_isoc= %d\n",
550 pd->nb_queued_isoc_frames);
554 oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
559 pd->last_sent_frame = e;
560 skb = oz_build_frame(pd, f);
561 spin_unlock(&pd->tx_frame_lock);
563 oz_set_more_bit(skb);
564 oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
566 if (dev_queue_xmit(skb) < 0)
572 /*------------------------------------------------------------------------------
573 * Context: softirq-serialized
575 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
577 while (oz_prepare_frame(pd, 0) >= 0)
580 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
582 case OZ_F_ISOC_NO_ELTS: {
583 backlog += pd->nb_queued_isoc_frames;
586 if (backlog > OZ_MAX_SUBMITTED_ISOC)
587 backlog = OZ_MAX_SUBMITTED_ISOC;
590 case OZ_NO_ELTS_ANYTIME: {
591 if ((backlog <= 0) && (pd->isoc_sent == 0))
602 if (oz_send_next_queued_frame(pd, backlog) < 0)
607 out: oz_prepare_frame(pd, 1);
608 oz_send_next_queued_frame(pd, 0);
610 /*------------------------------------------------------------------------------
613 static int oz_send_isoc_frame(struct oz_pd *pd)
616 struct net_device *dev = pd->net_dev;
617 struct oz_hdr *oz_hdr;
620 struct list_head list;
621 int total_size = sizeof(struct oz_hdr);
622 INIT_LIST_HEAD(&list);
624 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
625 pd->max_tx_size, &list);
626 if (list.next == &list)
628 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
630 oz_trace("Cannot alloc skb\n");
631 oz_elt_info_free_chain(&pd->elt_buff, &list);
634 skb_reserve(skb, LL_RESERVED_SPACE(dev));
635 skb_reset_network_header(skb);
637 skb->protocol = htons(OZ_ETHERTYPE);
638 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
639 dev->dev_addr, skb->len) < 0) {
643 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
644 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
645 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
646 elt = (struct oz_elt *)(oz_hdr+1);
648 for (e = list.next; e != &list; e = e->next) {
649 struct oz_elt_info *ei;
650 ei = container_of(e, struct oz_elt_info, link);
651 memcpy(elt, ei->data, ei->length);
652 elt = oz_next_elt(elt);
655 oz_elt_info_free_chain(&pd->elt_buff, &list);
658 /*------------------------------------------------------------------------------
659 * Context: softirq-serialized
661 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
664 struct oz_tx_frame *f;
665 struct list_head *first = NULL;
666 struct list_head *last = NULL;
670 spin_lock(&pd->tx_frame_lock);
671 e = pd->tx_queue.next;
672 while (e != &pd->tx_queue) {
673 f = container_of(e, struct oz_tx_frame, link);
674 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
675 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
676 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
678 oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
679 pkt_num, pd->nb_queued_frames);
684 pd->nb_queued_frames--;
687 last->next->prev = &pd->tx_queue;
688 pd->tx_queue.next = last->next;
691 pd->last_sent_frame = &pd->tx_queue;
692 spin_unlock(&pd->tx_frame_lock);
694 f = container_of(first, struct oz_tx_frame, link);
696 oz_retire_frame(pd, f);
699 /*------------------------------------------------------------------------------
700 * Precondition: stream_lock must be held.
703 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
706 struct oz_isoc_stream *st;
707 list_for_each(e, &pd->stream_list) {
708 st = container_of(e, struct oz_isoc_stream, link);
709 if (st->ep_num == ep_num)
714 /*------------------------------------------------------------------------------
717 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
719 struct oz_isoc_stream *st =
720 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
724 spin_lock_bh(&pd->stream_lock);
725 if (!pd_stream_find(pd, ep_num)) {
726 list_add(&st->link, &pd->stream_list);
729 spin_unlock_bh(&pd->stream_lock);
733 /*------------------------------------------------------------------------------
734 * Context: softirq or process
736 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
741 /*------------------------------------------------------------------------------
744 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
746 struct oz_isoc_stream *st;
747 spin_lock_bh(&pd->stream_lock);
748 st = pd_stream_find(pd, ep_num);
751 spin_unlock_bh(&pd->stream_lock);
753 oz_isoc_stream_free(st);
756 /*------------------------------------------------------------------------------
759 static void oz_isoc_destructor(struct sk_buff *skb)
761 atomic_dec(&g_submitted_isoc);
763 /*------------------------------------------------------------------------------
766 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
768 struct net_device *dev = pd->net_dev;
769 struct oz_isoc_stream *st;
771 struct sk_buff *skb = NULL;
772 struct oz_hdr *oz_hdr = NULL;
774 spin_lock_bh(&pd->stream_lock);
775 st = pd_stream_find(pd, ep_num);
779 nb_units = st->nb_units;
784 spin_unlock_bh(&pd->stream_lock);
788 /* Allocate enough space for max size frame. */
789 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
793 /* Reserve the head room for lower layers. */
794 skb_reserve(skb, LL_RESERVED_SPACE(dev));
795 skb_reset_network_header(skb);
797 skb->protocol = htons(OZ_ETHERTYPE);
798 /* For audio packet set priority to AC_VO */
800 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
801 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
803 memcpy(skb_put(skb, len), data, len);
805 if (++nb_units < pd->ms_per_isoc) {
806 spin_lock_bh(&pd->stream_lock);
808 st->nb_units = nb_units;
811 spin_unlock_bh(&pd->stream_lock);
814 struct oz_isoc_large iso;
815 spin_lock_bh(&pd->stream_lock);
816 iso.frame_number = st->frame_num;
817 st->frame_num += nb_units;
818 spin_unlock_bh(&pd->stream_lock);
820 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
821 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
823 iso.endpoint = ep_num;
824 iso.format = OZ_DATA_F_ISOC_LARGE;
825 iso.ms_data = nb_units;
826 memcpy(oz_hdr, &oz, sizeof(oz));
827 memcpy(oz_hdr+1, &iso, sizeof(iso));
828 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
829 dev->dev_addr, skb->len) < 0)
832 skb->destructor = oz_isoc_destructor;
833 /*Queue for Xmit if mode is not ANYTIME*/
834 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
835 struct oz_tx_frame *isoc_unit = NULL;
836 int nb = pd->nb_queued_isoc_frames;
837 if (nb >= pd->isoc_latency) {
838 oz_trace2(OZ_TRACE_TX_FRAMES,
839 "Dropping ISOC Unit nb= %d\n",
843 isoc_unit = oz_tx_frame_alloc(pd);
844 if (isoc_unit == NULL)
847 isoc_unit->skb = skb;
848 spin_lock_bh(&pd->tx_frame_lock);
849 list_add_tail(&isoc_unit->link, &pd->tx_queue);
850 pd->nb_queued_isoc_frames++;
851 spin_unlock_bh(&pd->tx_frame_lock);
852 oz_trace2(OZ_TRACE_TX_FRAMES,
853 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
854 pd->nb_queued_isoc_frames, pd->nb_queued_frames);
858 /*In ANYTIME mode Xmit unit immediately*/
859 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
860 atomic_inc(&g_submitted_isoc);
861 if (dev_queue_xmit(skb) < 0)
873 /*------------------------------------------------------------------------------
876 void oz_apps_init(void)
879 for (i = 0; i < OZ_APPID_MAX; i++)
880 if (g_app_if[i].init)
883 /*------------------------------------------------------------------------------
886 void oz_apps_term(void)
889 /* Terminate all the apps. */
890 for (i = 0; i < OZ_APPID_MAX; i++)
891 if (g_app_if[i].term)
894 /*------------------------------------------------------------------------------
895 * Context: softirq-serialized
897 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
899 const struct oz_app_if *ai;
900 if (app_id == 0 || app_id > OZ_APPID_MAX)
902 ai = &g_app_if[app_id-1];
905 /*------------------------------------------------------------------------------
906 * Context: softirq or process
908 void oz_pd_indicate_farewells(struct oz_pd *pd)
910 struct oz_farewell *f;
911 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
913 oz_polling_lock_bh();
914 if (list_empty(&pd->farewell_list)) {
915 oz_polling_unlock_bh();
918 f = list_first_entry(&pd->farewell_list,
919 struct oz_farewell, link);
921 oz_polling_unlock_bh();
923 ai->farewell(pd, f->ep_num, f->report, f->len);