Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / ozwpan / ozpd.c
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozconfig.h"
13 #include "ozprotocol.h"
14 #include "ozeltbuf.h"
15 #include "ozpd.h"
16 #include "ozproto.h"
17 #include "oztrace.h"
18 #include "ozcdev.h"
19 #include "ozusbsvc.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
23 /*------------------------------------------------------------------------------
24  */
25 #define OZ_MAX_TX_POOL_SIZE     6
26 /*------------------------------------------------------------------------------
27  */
28 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
29 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
31 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
32 static int oz_send_isoc_frame(struct oz_pd *pd);
33 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
34 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
35 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
36 static void oz_isoc_destructor(struct sk_buff *skb);
37 static int oz_def_app_init(void);
38 static void oz_def_app_term(void);
39 static int oz_def_app_start(struct oz_pd *pd, int resume);
40 static void oz_def_app_stop(struct oz_pd *pd, int pause);
41 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
42 /*------------------------------------------------------------------------------
43  * Counts the uncompleted isoc frames submitted to netcard.
44  */
45 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
46 /* Application handler functions.
47  */
48 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
49         {oz_usb_init,
50         oz_usb_term,
51         oz_usb_start,
52         oz_usb_stop,
53         oz_usb_rx,
54         oz_usb_heartbeat,
55         oz_usb_farewell,
56         OZ_APPID_USB},
57
58         {oz_def_app_init,
59         oz_def_app_term,
60         oz_def_app_start,
61         oz_def_app_stop,
62         oz_def_app_rx,
63         NULL,
64         NULL,
65         OZ_APPID_UNUSED1},
66
67         {oz_def_app_init,
68         oz_def_app_term,
69         oz_def_app_start,
70         oz_def_app_stop,
71         oz_def_app_rx,
72         NULL,
73         NULL,
74         OZ_APPID_UNUSED2},
75
76         {oz_cdev_init,
77         oz_cdev_term,
78         oz_cdev_start,
79         oz_cdev_stop,
80         oz_cdev_rx,
81         NULL,
82         NULL,
83         OZ_APPID_SERIAL},
84 };
85 /*------------------------------------------------------------------------------
86  * Context: process
87  */
88 static int oz_def_app_init(void)
89 {
90         return 0;
91 }
92 /*------------------------------------------------------------------------------
93  * Context: process
94  */
95 static void oz_def_app_term(void)
96 {
97 }
98 /*------------------------------------------------------------------------------
99  * Context: softirq
100  */
101 static int oz_def_app_start(struct oz_pd *pd, int resume)
102 {
103         return 0;
104 }
105 /*------------------------------------------------------------------------------
106  * Context: softirq
107  */
108 static void oz_def_app_stop(struct oz_pd *pd, int pause)
109 {
110 }
111 /*------------------------------------------------------------------------------
112  * Context: softirq
113  */
114 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
115 {
116 }
117 /*------------------------------------------------------------------------------
118  * Context: softirq or process
119  */
120 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
121 {
122         pd->state = state;
123 #ifdef WANT_TRACE
124         switch (state) {
125         case OZ_PD_S_IDLE:
126                 oz_trace("PD State: OZ_PD_S_IDLE\n");
127                 break;
128         case OZ_PD_S_CONNECTED:
129                 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
130                 break;
131         case OZ_PD_S_STOPPED:
132                 oz_trace("PD State: OZ_PD_S_STOPPED\n");
133                 break;
134         case OZ_PD_S_SLEEP:
135                 oz_trace("PD State: OZ_PD_S_SLEEP\n");
136                 break;
137         }
138 #endif /* WANT_TRACE */
139 }
140 /*------------------------------------------------------------------------------
141  * Context: softirq or process
142  */
143 void oz_pd_get(struct oz_pd *pd)
144 {
145         atomic_inc(&pd->ref_count);
146 }
147 /*------------------------------------------------------------------------------
148  * Context: softirq or process
149  */
150 void oz_pd_put(struct oz_pd *pd)
151 {
152         if (atomic_dec_and_test(&pd->ref_count))
153                 oz_pd_destroy(pd);
154 }
155 /*------------------------------------------------------------------------------
156  * Context: softirq-serialized
157  */
158 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
159 {
160         struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
161         if (pd) {
162                 int i;
163                 atomic_set(&pd->ref_count, 2);
164                 for (i = 0; i < OZ_APPID_MAX; i++)
165                         spin_lock_init(&pd->app_lock[i]);
166                 pd->last_rx_pkt_num = 0xffffffff;
167                 oz_pd_set_state(pd, OZ_PD_S_IDLE);
168                 pd->max_tx_size = OZ_MAX_TX_SIZE;
169                 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
170                 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
171                         kfree(pd);
172                         pd = NULL;
173                 }
174                 spin_lock_init(&pd->tx_frame_lock);
175                 INIT_LIST_HEAD(&pd->tx_queue);
176                 INIT_LIST_HEAD(&pd->farewell_list);
177                 pd->last_sent_frame = &pd->tx_queue;
178                 spin_lock_init(&pd->stream_lock);
179                 INIT_LIST_HEAD(&pd->stream_list);
180         }
181         return pd;
182 }
183 /*------------------------------------------------------------------------------
184  * Context: softirq or process
185  */
186 void oz_pd_destroy(struct oz_pd *pd)
187 {
188         struct list_head *e;
189         struct oz_tx_frame *f;
190         struct oz_isoc_stream *st;
191         struct oz_farewell *fwell;
192         oz_trace("Destroying PD\n");
193         /* Delete any streams.
194          */
195         e = pd->stream_list.next;
196         while (e != &pd->stream_list) {
197                 st = container_of(e, struct oz_isoc_stream, link);
198                 e = e->next;
199                 oz_isoc_stream_free(st);
200         }
201         /* Free any queued tx frames.
202          */
203         e = pd->tx_queue.next;
204         while (e != &pd->tx_queue) {
205                 f = container_of(e, struct oz_tx_frame, link);
206                 e = e->next;
207                 if (f->skb != NULL)
208                         kfree_skb(f->skb);
209                 oz_retire_frame(pd, f);
210         }
211         oz_elt_buf_term(&pd->elt_buff);
212         /* Free any farewells.
213          */
214         e = pd->farewell_list.next;
215         while (e != &pd->farewell_list) {
216                 fwell = container_of(e, struct oz_farewell, link);
217                 e = e->next;
218                 kfree(fwell);
219         }
220         /* Deallocate all frames in tx pool.
221          */
222         while (pd->tx_pool) {
223                 e = pd->tx_pool;
224                 pd->tx_pool = e->next;
225                 kfree(container_of(e, struct oz_tx_frame, link));
226         }
227         if (pd->net_dev)
228                 dev_put(pd->net_dev);
229         kfree(pd);
230 }
231 /*------------------------------------------------------------------------------
232  * Context: softirq-serialized
233  */
234 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
235 {
236         const struct oz_app_if *ai;
237         int rc = 0;
238         oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
239         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
240                 if (apps & (1<<ai->app_id)) {
241                         if (ai->start(pd, resume)) {
242                                 rc = -1;
243                                 oz_trace("Unabled to start service %d\n",
244                                         ai->app_id);
245                                 break;
246                         }
247                         oz_polling_lock_bh();
248                         pd->total_apps |= (1<<ai->app_id);
249                         if (resume)
250                                 pd->paused_apps &= ~(1<<ai->app_id);
251                         oz_polling_unlock_bh();
252                 }
253         }
254         return rc;
255 }
256 /*------------------------------------------------------------------------------
257  * Context: softirq or process
258  */
259 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
260 {
261         const struct oz_app_if *ai;
262         oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
263         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
264                 if (apps & (1<<ai->app_id)) {
265                         oz_polling_lock_bh();
266                         if (pause) {
267                                 pd->paused_apps |= (1<<ai->app_id);
268                         } else {
269                                 pd->total_apps &= ~(1<<ai->app_id);
270                                 pd->paused_apps &= ~(1<<ai->app_id);
271                         }
272                         oz_polling_unlock_bh();
273                         ai->stop(pd, pause);
274                 }
275         }
276 }
277 /*------------------------------------------------------------------------------
278  * Context: softirq
279  */
280 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
281 {
282         const struct oz_app_if *ai;
283         int more = 0;
284         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
285                 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
286                         if (ai->heartbeat(pd))
287                                 more = 1;
288                 }
289         }
290         if (more)
291                 oz_pd_request_heartbeat(pd);
292         if (pd->mode & OZ_F_ISOC_ANYTIME) {
293                 int count = 8;
294                 while (count-- && (oz_send_isoc_frame(pd) >= 0))
295                         ;
296         }
297 }
298 /*------------------------------------------------------------------------------
299  * Context: softirq or process
300  */
301 void oz_pd_stop(struct oz_pd *pd)
302 {
303         u16 stop_apps = 0;
304         oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
305         oz_pd_indicate_farewells(pd);
306         oz_polling_lock_bh();
307         stop_apps = pd->total_apps;
308         pd->total_apps = 0;
309         pd->paused_apps = 0;
310         oz_polling_unlock_bh();
311         oz_services_stop(pd, stop_apps, 0);
312         oz_polling_lock_bh();
313         oz_pd_set_state(pd, OZ_PD_S_STOPPED);
314         /* Remove from PD list.*/
315         list_del(&pd->link);
316         oz_polling_unlock_bh();
317         oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
318         oz_timer_delete(pd, 0);
319         oz_pd_put(pd);
320 }
321 /*------------------------------------------------------------------------------
322  * Context: softirq
323  */
324 int oz_pd_sleep(struct oz_pd *pd)
325 {
326         int do_stop = 0;
327         u16 stop_apps = 0;
328         oz_polling_lock_bh();
329         if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
330                 oz_polling_unlock_bh();
331                 return 0;
332         }
333         if (pd->keep_alive_j && pd->session_id) {
334                 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
335                 pd->pulse_time_j = jiffies + pd->keep_alive_j;
336                 oz_trace("Sleep Now %lu until %lu\n",
337                         jiffies, pd->pulse_time_j);
338         } else {
339                 do_stop = 1;
340         }
341         stop_apps = pd->total_apps;
342         oz_polling_unlock_bh();
343         if (do_stop) {
344                 oz_pd_stop(pd);
345         } else {
346                 oz_services_stop(pd, stop_apps, 1);
347                 oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
348         }
349         return do_stop;
350 }
351 /*------------------------------------------------------------------------------
352  * Context: softirq
353  */
354 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
355 {
356         struct oz_tx_frame *f = NULL;
357         spin_lock_bh(&pd->tx_frame_lock);
358         if (pd->tx_pool) {
359                 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
360                 pd->tx_pool = pd->tx_pool->next;
361                 pd->tx_pool_count--;
362         }
363         spin_unlock_bh(&pd->tx_frame_lock);
364         if (f == NULL)
365                 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
366         if (f) {
367                 f->total_size = sizeof(struct oz_hdr);
368                 INIT_LIST_HEAD(&f->link);
369                 INIT_LIST_HEAD(&f->elt_list);
370         }
371         return f;
372 }
373 /*------------------------------------------------------------------------------
374  * Context: softirq or process
375  */
376 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
377 {
378         pd->nb_queued_isoc_frames--;
379         list_del_init(&f->link);
380         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
381                 f->link.next = pd->tx_pool;
382                 pd->tx_pool = &f->link;
383                 pd->tx_pool_count++;
384         } else {
385                 kfree(f);
386         }
387         oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
388                                                 pd->nb_queued_isoc_frames);
389 }
390 /*------------------------------------------------------------------------------
391  * Context: softirq or process
392  */
393 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
394 {
395         spin_lock_bh(&pd->tx_frame_lock);
396         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
397                 f->link.next = pd->tx_pool;
398                 pd->tx_pool = &f->link;
399                 pd->tx_pool_count++;
400                 f = NULL;
401         }
402         spin_unlock_bh(&pd->tx_frame_lock);
403         kfree(f);
404 }
405 /*------------------------------------------------------------------------------
406  * Context: softirq-serialized
407  */
408 static void oz_set_more_bit(struct sk_buff *skb)
409 {
410         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
411         oz_hdr->control |= OZ_F_MORE_DATA;
412 }
413 /*------------------------------------------------------------------------------
414  * Context: softirq-serialized
415  */
416 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
417 {
418         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
419         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
420 }
421 /*------------------------------------------------------------------------------
422  * Context: softirq
423  */
424 int oz_prepare_frame(struct oz_pd *pd, int empty)
425 {
426         struct oz_tx_frame *f;
427         if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
428                 return -1;
429         if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
430                 return -1;
431         if (!empty && !oz_are_elts_available(&pd->elt_buff))
432                 return -1;
433         f = oz_tx_frame_alloc(pd);
434         if (f == NULL)
435                 return -1;
436         f->skb = NULL;
437         f->hdr.control =
438                 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
439         ++pd->last_tx_pkt_num;
440         put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
441         if (empty == 0) {
442                 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
443                         pd->max_tx_size, &f->elt_list);
444         }
445         spin_lock(&pd->tx_frame_lock);
446         list_add_tail(&f->link, &pd->tx_queue);
447         pd->nb_queued_frames++;
448         spin_unlock(&pd->tx_frame_lock);
449         return 0;
450 }
451 /*------------------------------------------------------------------------------
452  * Context: softirq-serialized
453  */
454 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
455 {
456         struct sk_buff *skb;
457         struct net_device *dev = pd->net_dev;
458         struct oz_hdr *oz_hdr;
459         struct oz_elt *elt;
460         struct list_head *e;
461         /* Allocate skb with enough space for the lower layers as well
462          * as the space we need.
463          */
464         skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
465         if (skb == NULL)
466                 return NULL;
467         /* Reserve the head room for lower layers.
468          */
469         skb_reserve(skb, LL_RESERVED_SPACE(dev));
470         skb_reset_network_header(skb);
471         skb->dev = dev;
472         skb->protocol = htons(OZ_ETHERTYPE);
473         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
474                 dev->dev_addr, skb->len) < 0)
475                 goto fail;
476         /* Push the tail to the end of the area we are going to copy to.
477          */
478         oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
479         f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
480         memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
481         /* Copy the elements into the frame body.
482          */
483         elt = (struct oz_elt *)(oz_hdr+1);
484         for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
485                 struct oz_elt_info *ei;
486                 ei = container_of(e, struct oz_elt_info, link);
487                 memcpy(elt, ei->data, ei->length);
488                 elt = oz_next_elt(elt);
489         }
490         return skb;
491 fail:
492         kfree_skb(skb);
493         return NULL;
494 }
495 /*------------------------------------------------------------------------------
496  * Context: softirq or process
497  */
498 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
499 {
500         struct list_head *e;
501         struct oz_elt_info *ei;
502         e = f->elt_list.next;
503         while (e != &f->elt_list) {
504                 ei = container_of(e, struct oz_elt_info, link);
505                 e = e->next;
506                 list_del_init(&ei->link);
507                 if (ei->callback)
508                         ei->callback(pd, ei->context);
509                 spin_lock_bh(&pd->elt_buff.lock);
510                 oz_elt_info_free(&pd->elt_buff, ei);
511                 spin_unlock_bh(&pd->elt_buff.lock);
512         }
513         oz_tx_frame_free(pd, f);
514         if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
515                 oz_trim_elt_pool(&pd->elt_buff);
516 }
517 /*------------------------------------------------------------------------------
518  * Context: softirq-serialized
519  */
520 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
521 {
522         struct sk_buff *skb;
523         struct oz_tx_frame *f;
524         struct list_head *e;
525         spin_lock(&pd->tx_frame_lock);
526         e = pd->last_sent_frame->next;
527         if (e == &pd->tx_queue) {
528                 spin_unlock(&pd->tx_frame_lock);
529                 return -1;
530         }
531         f = container_of(e, struct oz_tx_frame, link);
532
533         if (f->skb != NULL) {
534                 skb = f->skb;
535                 oz_tx_isoc_free(pd, f);
536                 spin_unlock(&pd->tx_frame_lock);
537                 if (more_data)
538                         oz_set_more_bit(skb);
539                 oz_set_last_pkt_nb(pd, skb);
540                 if ((int)atomic_read(&g_submitted_isoc) <
541                                                         OZ_MAX_SUBMITTED_ISOC) {
542                         if (dev_queue_xmit(skb) < 0) {
543                                 oz_trace2(OZ_TRACE_TX_FRAMES,
544                                                 "Dropping ISOC Frame\n");
545                                 return -1;
546                         }
547                         atomic_inc(&g_submitted_isoc);
548                         oz_trace2(OZ_TRACE_TX_FRAMES,
549                                         "Sending ISOC Frame, nb_isoc= %d\n",
550                                                 pd->nb_queued_isoc_frames);
551                         return 0;
552                 } else {
553                         kfree_skb(skb);
554                         oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
555                         return -1;
556                 }
557         }
558
559         pd->last_sent_frame = e;
560         skb = oz_build_frame(pd, f);
561         spin_unlock(&pd->tx_frame_lock);
562         if (more_data)
563                 oz_set_more_bit(skb);
564         oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
565         if (skb) {
566                 if (dev_queue_xmit(skb) < 0)
567                         return -1;
568
569         }
570         return 0;
571 }
572 /*------------------------------------------------------------------------------
573  * Context: softirq-serialized
574  */
575 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
576 {
577         while (oz_prepare_frame(pd, 0) >= 0)
578                 backlog++;
579
580         switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
581
582                 case OZ_F_ISOC_NO_ELTS: {
583                         backlog += pd->nb_queued_isoc_frames;
584                         if (backlog <= 0)
585                                 goto out;
586                         if (backlog > OZ_MAX_SUBMITTED_ISOC)
587                                 backlog = OZ_MAX_SUBMITTED_ISOC;
588                         break;
589                 }
590                 case OZ_NO_ELTS_ANYTIME: {
591                         if ((backlog <= 0) && (pd->isoc_sent == 0))
592                                 goto out;
593                         break;
594                 }
595                 default: {
596                         if (backlog <= 0)
597                                 goto out;
598                         break;
599                 }
600         }
601         while (backlog--) {
602                 if (oz_send_next_queued_frame(pd, backlog) < 0)
603                         break;
604         }
605         return;
606
607 out:    oz_prepare_frame(pd, 1);
608         oz_send_next_queued_frame(pd, 0);
609 }
610 /*------------------------------------------------------------------------------
611  * Context: softirq
612  */
613 static int oz_send_isoc_frame(struct oz_pd *pd)
614 {
615         struct sk_buff *skb;
616         struct net_device *dev = pd->net_dev;
617         struct oz_hdr *oz_hdr;
618         struct oz_elt *elt;
619         struct list_head *e;
620         struct list_head list;
621         int total_size = sizeof(struct oz_hdr);
622         INIT_LIST_HEAD(&list);
623
624         oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
625                 pd->max_tx_size, &list);
626         if (list.next == &list)
627                 return 0;
628         skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
629         if (skb == NULL) {
630                 oz_trace("Cannot alloc skb\n");
631                 oz_elt_info_free_chain(&pd->elt_buff, &list);
632                 return -1;
633         }
634         skb_reserve(skb, LL_RESERVED_SPACE(dev));
635         skb_reset_network_header(skb);
636         skb->dev = dev;
637         skb->protocol = htons(OZ_ETHERTYPE);
638         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
639                 dev->dev_addr, skb->len) < 0) {
640                 kfree_skb(skb);
641                 return -1;
642         }
643         oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
644         oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
645         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
646         elt = (struct oz_elt *)(oz_hdr+1);
647
648         for (e = list.next; e != &list; e = e->next) {
649                 struct oz_elt_info *ei;
650                 ei = container_of(e, struct oz_elt_info, link);
651                 memcpy(elt, ei->data, ei->length);
652                 elt = oz_next_elt(elt);
653         }
654         dev_queue_xmit(skb);
655         oz_elt_info_free_chain(&pd->elt_buff, &list);
656         return 0;
657 }
658 /*------------------------------------------------------------------------------
659  * Context: softirq-serialized
660  */
661 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
662 {
663         struct list_head *e;
664         struct oz_tx_frame *f;
665         struct list_head *first = NULL;
666         struct list_head *last = NULL;
667         u8 diff;
668         u32 pkt_num;
669
670         spin_lock(&pd->tx_frame_lock);
671         e = pd->tx_queue.next;
672         while (e != &pd->tx_queue) {
673                 f = container_of(e, struct oz_tx_frame, link);
674                 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
675                 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
676                 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
677                         break;
678                 oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
679                                                  pkt_num, pd->nb_queued_frames);
680                 if (first == NULL)
681                         first = e;
682                 last = e;
683                 e = e->next;
684                 pd->nb_queued_frames--;
685         }
686         if (first) {
687                 last->next->prev = &pd->tx_queue;
688                 pd->tx_queue.next = last->next;
689                 last->next = NULL;
690         }
691         pd->last_sent_frame = &pd->tx_queue;
692         spin_unlock(&pd->tx_frame_lock);
693         while (first) {
694                 f = container_of(first, struct oz_tx_frame, link);
695                 first = first->next;
696                 oz_retire_frame(pd, f);
697         }
698 }
699 /*------------------------------------------------------------------------------
700  * Precondition: stream_lock must be held.
701  * Context: softirq
702  */
703 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
704 {
705         struct list_head *e;
706         struct oz_isoc_stream *st;
707         list_for_each(e, &pd->stream_list) {
708                 st = container_of(e, struct oz_isoc_stream, link);
709                 if (st->ep_num == ep_num)
710                         return st;
711         }
712         return NULL;
713 }
714 /*------------------------------------------------------------------------------
715  * Context: softirq
716  */
717 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
718 {
719         struct oz_isoc_stream *st =
720                 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
721         if (!st)
722                 return -ENOMEM;
723         st->ep_num = ep_num;
724         spin_lock_bh(&pd->stream_lock);
725         if (!pd_stream_find(pd, ep_num)) {
726                 list_add(&st->link, &pd->stream_list);
727                 st = NULL;
728         }
729         spin_unlock_bh(&pd->stream_lock);
730         kfree(st);
731         return 0;
732 }
733 /*------------------------------------------------------------------------------
734  * Context: softirq or process
735  */
736 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
737 {
738         kfree_skb(st->skb);
739         kfree(st);
740 }
741 /*------------------------------------------------------------------------------
742  * Context: softirq
743  */
744 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
745 {
746         struct oz_isoc_stream *st;
747         spin_lock_bh(&pd->stream_lock);
748         st = pd_stream_find(pd, ep_num);
749         if (st)
750                 list_del(&st->link);
751         spin_unlock_bh(&pd->stream_lock);
752         if (st)
753                 oz_isoc_stream_free(st);
754         return 0;
755 }
756 /*------------------------------------------------------------------------------
757  * Context: any
758  */
759 static void oz_isoc_destructor(struct sk_buff *skb)
760 {
761         atomic_dec(&g_submitted_isoc);
762 }
763 /*------------------------------------------------------------------------------
764  * Context: softirq
765  */
766 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
767 {
768         struct net_device *dev = pd->net_dev;
769         struct oz_isoc_stream *st;
770         u8 nb_units = 0;
771         struct sk_buff *skb = NULL;
772         struct oz_hdr *oz_hdr = NULL;
773         int size = 0;
774         spin_lock_bh(&pd->stream_lock);
775         st = pd_stream_find(pd, ep_num);
776         if (st) {
777                 skb = st->skb;
778                 st->skb = NULL;
779                 nb_units = st->nb_units;
780                 st->nb_units = 0;
781                 oz_hdr = st->oz_hdr;
782                 size = st->size;
783         }
784         spin_unlock_bh(&pd->stream_lock);
785         if (!st)
786                 return 0;
787         if (!skb) {
788                 /* Allocate enough space for max size frame. */
789                 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
790                                 GFP_ATOMIC);
791                 if (skb == NULL)
792                         return 0;
793                 /* Reserve the head room for lower layers. */
794                 skb_reserve(skb, LL_RESERVED_SPACE(dev));
795                 skb_reset_network_header(skb);
796                 skb->dev = dev;
797                 skb->protocol = htons(OZ_ETHERTYPE);
798                 /* For audio packet set priority to AC_VO */
799                 skb->priority = 0x7;
800                 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
801                 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
802         }
803         memcpy(skb_put(skb, len), data, len);
804         size += len;
805         if (++nb_units < pd->ms_per_isoc) {
806                 spin_lock_bh(&pd->stream_lock);
807                 st->skb = skb;
808                 st->nb_units = nb_units;
809                 st->oz_hdr = oz_hdr;
810                 st->size = size;
811                 spin_unlock_bh(&pd->stream_lock);
812         } else {
813                 struct oz_hdr oz;
814                 struct oz_isoc_large iso;
815                 spin_lock_bh(&pd->stream_lock);
816                 iso.frame_number = st->frame_num;
817                 st->frame_num += nb_units;
818                 spin_unlock_bh(&pd->stream_lock);
819                 oz.control =
820                         (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
821                 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
822                 oz.pkt_num = 0;
823                 iso.endpoint = ep_num;
824                 iso.format = OZ_DATA_F_ISOC_LARGE;
825                 iso.ms_data = nb_units;
826                 memcpy(oz_hdr, &oz, sizeof(oz));
827                 memcpy(oz_hdr+1, &iso, sizeof(iso));
828                 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
829                                 dev->dev_addr, skb->len) < 0)
830                         goto out;
831
832                 skb->destructor = oz_isoc_destructor;
833                 /*Queue for Xmit if mode is not ANYTIME*/
834                 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
835                         struct oz_tx_frame *isoc_unit = NULL;
836                         int nb = pd->nb_queued_isoc_frames;
837                         if (nb >= pd->isoc_latency) {
838                                 oz_trace2(OZ_TRACE_TX_FRAMES,
839                                                 "Dropping ISOC Unit nb= %d\n",
840                                                                         nb);
841                                 goto out;
842                         }
843                         isoc_unit = oz_tx_frame_alloc(pd);
844                         if (isoc_unit == NULL)
845                                 goto out;
846                         isoc_unit->hdr = oz;
847                         isoc_unit->skb = skb;
848                         spin_lock_bh(&pd->tx_frame_lock);
849                         list_add_tail(&isoc_unit->link, &pd->tx_queue);
850                         pd->nb_queued_isoc_frames++;
851                         spin_unlock_bh(&pd->tx_frame_lock);
852                         oz_trace2(OZ_TRACE_TX_FRAMES,
853                         "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
854                         pd->nb_queued_isoc_frames, pd->nb_queued_frames);
855                         return 0;
856                 }
857
858                 /*In ANYTIME mode Xmit unit immediately*/
859                 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
860                         atomic_inc(&g_submitted_isoc);
861                         if (dev_queue_xmit(skb) < 0)
862                                 return -1;
863                         else
864                                 return 0;
865                 }
866
867 out:    kfree_skb(skb);
868         return -1;
869
870         }
871         return 0;
872 }
873 /*------------------------------------------------------------------------------
874  * Context: process
875  */
876 void oz_apps_init(void)
877 {
878         int i;
879         for (i = 0; i < OZ_APPID_MAX; i++)
880                 if (g_app_if[i].init)
881                         g_app_if[i].init();
882 }
883 /*------------------------------------------------------------------------------
884  * Context: process
885  */
886 void oz_apps_term(void)
887 {
888         int i;
889         /* Terminate all the apps. */
890         for (i = 0; i < OZ_APPID_MAX; i++)
891                 if (g_app_if[i].term)
892                         g_app_if[i].term();
893 }
894 /*------------------------------------------------------------------------------
895  * Context: softirq-serialized
896  */
897 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
898 {
899         const struct oz_app_if *ai;
900         if (app_id == 0 || app_id > OZ_APPID_MAX)
901                 return;
902         ai = &g_app_if[app_id-1];
903         ai->rx(pd, elt);
904 }
905 /*------------------------------------------------------------------------------
906  * Context: softirq or process
907  */
908 void oz_pd_indicate_farewells(struct oz_pd *pd)
909 {
910         struct oz_farewell *f;
911         const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
912         while (1) {
913                 oz_polling_lock_bh();
914                 if (list_empty(&pd->farewell_list)) {
915                         oz_polling_unlock_bh();
916                         break;
917                 }
918                 f = list_first_entry(&pd->farewell_list,
919                                 struct oz_farewell, link);
920                 list_del(&f->link);
921                 oz_polling_unlock_bh();
922                 if (ai->farewell)
923                         ai->farewell(pd, f->ep_num, f->report, f->len);
924                 kfree(f);
925         }
926 }