net: wifi: rockchip: update broadcom drivers for kernel4.4
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_flowring.c
1 /*
2  * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
3  *
4  * Flow rings are transmit traffic (=propagating towards antenna) related entities
5  *
6  *
7  * Copyright (C) 1999-2016, Broadcom Corporation
8  * 
9  *      Unless you and Broadcom execute a separate written software license
10  * agreement governing use of this software, this software is licensed to you
11  * under the terms of the GNU General Public License version 2 (the "GPL"),
12  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13  * following added to such license:
14  * 
15  *      As a special exception, the copyright holders of this software give you
16  * permission to link this software with independent modules, and to copy and
17  * distribute the resulting executable under terms of your choice, provided that
18  * you also meet, for each linked independent module, the terms and conditions of
19  * the license of that module.  An independent module is a module which is not
20  * derived from this software.  The special exception does not apply to any
21  * modifications of the software.
22  * 
23  *      Notwithstanding the above, under no circumstances may you combine this
24  * software in any way with any other Broadcom software provided under a license
25  * other than the GPL, without Broadcom's express prior written consent.
26  *
27  *
28  * <<Broadcom-WL-IPTag/Open:>>
29  *
30  * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $
31  */
32
33
34 #include <typedefs.h>
35 #include <bcmutils.h>
36 #include <bcmendian.h>
37 #include <bcmdevs.h>
38
39 #include <proto/ethernet.h>
40 #include <proto/bcmevent.h>
41 #include <dngl_stats.h>
42
43 #include <dhd.h>
44
45 #include <dhd_flowring.h>
46 #include <dhd_bus.h>
47 #include <dhd_proto.h>
48 #include <dhd_dbg.h>
49 #include <proto/802.1d.h>
50 #include <pcie_core.h>
51 #include <bcmmsgbuf.h>
52 #include <dhd_pcie.h>
53
54
55 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
56
57 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
58                                      uint8 prio, char *sa, char *da);
59
60 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
61                                       uint8 prio, char *sa, char *da);
62
63 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
64                                 uint8 prio, char *sa, char *da, uint16 *flowid);
65 int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
66
67 #define FLOW_QUEUE_PKT_NEXT(p)          PKTLINK(p)
68 #define FLOW_QUEUE_PKT_SETNEXT(p, x)    PKTSETLINK((p), (x))
69
70 #ifdef DHD_LOSSLESS_ROAMING
71 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
72 #else
73 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
74 #endif
75 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
76
77 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
78 static INLINE int
79 dhd_flow_queue_throttle(flow_queue_t *queue)
80 {
81         return DHD_FLOW_QUEUE_FULL(queue);
82 }
83
84 int BCMFASTPATH
85 dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
86 {
87         return BCME_NORESOURCE;
88 }
89
90 /** Returns flow ring given a flowid */
91 flow_ring_node_t *
92 dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
93 {
94         flow_ring_node_t * flow_ring_node;
95
96         ASSERT(dhdp != (dhd_pub_t*)NULL);
97         ASSERT(flowid < dhdp->num_flow_rings);
98
99         flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
100
101         ASSERT(flow_ring_node->flowid == flowid);
102         return flow_ring_node;
103 }
104
105 /** Returns 'backup' queue given a flowid */
106 flow_queue_t *
107 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
108 {
109         flow_ring_node_t * flow_ring_node;
110
111         flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
112         return &flow_ring_node->queue;
113 }
114
115 /* Flow ring's queue management functions */
116
117 /** Initialize a flow ring's queue, called on driver initialization. */
118 void
119 dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
120 {
121         ASSERT((queue != NULL) && (max > 0));
122
123         dll_init(&queue->list);
124         queue->head = queue->tail = NULL;
125         queue->len = 0;
126
127         /* Set queue's threshold and queue's parent cummulative length counter */
128         ASSERT(max > 1);
129         DHD_FLOW_QUEUE_SET_MAX(queue, max);
130         DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
131         DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
132
133         queue->failures = 0U;
134         queue->cb = &dhd_flow_queue_overflow;
135 }
136
137 /** Register an enqueue overflow callback handler */
138 void
139 dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
140 {
141         ASSERT(queue != NULL);
142         queue->cb = cb;
143 }
144
145 /**
146  * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
147  * to the flow ring itself.
148  */
149 int BCMFASTPATH
150 dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
151 {
152         int ret = BCME_OK;
153
154         ASSERT(queue != NULL);
155
156         if (dhd_flow_queue_throttle(queue)) {
157                 queue->failures++;
158                 ret = (*queue->cb)(queue, pkt);
159                 goto done;
160         }
161
162         if (queue->head) {
163                 FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
164         } else {
165                 queue->head = pkt;
166         }
167
168         FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
169
170         queue->tail = pkt; /* at tail */
171
172         queue->len++;
173         /* increment parent's cummulative length */
174         DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
175
176 done:
177         return ret;
178 }
179
180 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
181 void * BCMFASTPATH
182 dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
183 {
184         void * pkt;
185
186         ASSERT(queue != NULL);
187
188         pkt = queue->head; /* from head */
189
190         if (pkt == NULL) {
191                 ASSERT((queue->len == 0) && (queue->tail == NULL));
192                 goto done;
193         }
194
195         queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
196         if (queue->head == NULL)
197                 queue->tail = NULL;
198
199         queue->len--;
200         /* decrement parent's cummulative length */
201         DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
202
203         FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
204
205 done:
206         return pkt;
207 }
208
209 /** Reinsert a dequeued 802.3 packet back at the head */
210 void BCMFASTPATH
211 dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
212 {
213         if (queue->head == NULL) {
214                 queue->tail = pkt;
215         }
216
217         FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
218         queue->head = pkt;
219         queue->len++;
220         /* increment parent's cummulative length */
221         DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
222 }
223
224 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
225 void
226 dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
227                      int queue_budget, int cumm_threshold, void *cumm_ctr)
228 {
229         flow_queue_t * queue;
230
231         ASSERT(dhdp != (dhd_pub_t*)NULL);
232         ASSERT(queue_budget > 1);
233         ASSERT(cumm_threshold > 1);
234         ASSERT(cumm_ctr != (void*)NULL);
235
236         queue = dhd_flow_queue(dhdp, flowid);
237
238         DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
239
240         /* Set the queue's parent threshold and cummulative counter */
241         DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
242         DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
243 }
244
245 /** Initializes data structures of multiple flow rings */
246 int
247 dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
248 {
249         uint32 idx;
250         uint32 flow_ring_table_sz;
251         uint32 if_flow_lkup_sz = 0;
252         void * flowid_allocator;
253         flow_ring_table_t *flow_ring_table = NULL;
254         if_flow_lkup_t *if_flow_lkup = NULL;
255         void *lock = NULL;
256         void *list_lock = NULL;
257         unsigned long flags;
258
259         DHD_INFO(("%s\n", __FUNCTION__));
260
261         /* Construct a 16bit flowid allocator */
262         flowid_allocator = id16_map_init(dhdp->osh,
263                                num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED);
264         if (flowid_allocator == NULL) {
265                 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
266                 return BCME_NOMEM;
267         }
268
269         /* Allocate a flow ring table, comprising of requested number of rings */
270         flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
271         flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
272         if (flow_ring_table == NULL) {
273                 DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
274                 goto fail;
275         }
276
277         /* Initialize flow ring table state */
278         DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
279         bzero((uchar *)flow_ring_table, flow_ring_table_sz);
280         for (idx = 0; idx < num_flow_rings; idx++) {
281                 flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
282                 flow_ring_table[idx].flowid = (uint16)idx;
283                 flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
284                 if (flow_ring_table[idx].lock == NULL) {
285                         DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
286                         goto fail;
287                 }
288
289                 dll_init(&flow_ring_table[idx].list);
290
291                 /* Initialize the per flow ring backup queue */
292                 dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
293                                     FLOW_RING_QUEUE_THRESHOLD);
294         }
295
296         /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
297         if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
298         if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
299                 DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
300         if (if_flow_lkup == NULL) {
301                 DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
302                 goto fail;
303         }
304
305         /* Initialize per interface hash table */
306         for (idx = 0; idx < DHD_MAX_IFS; idx++) {
307                 int hash_ix;
308                 if_flow_lkup[idx].status = 0;
309                 if_flow_lkup[idx].role = 0;
310                 for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
311                         if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
312         }
313
314         lock = dhd_os_spin_lock_init(dhdp->osh);
315         if (lock == NULL)
316                 goto fail;
317
318         list_lock = dhd_os_spin_lock_init(dhdp->osh);
319         if (list_lock == NULL)
320                 goto lock_fail;
321
322         dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
323         bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
324 #ifdef DHD_LOSSLESS_ROAMING
325         dhdp->dequeue_prec_map = ALLPRIO;
326 #endif
327         /* Now populate into dhd pub */
328         DHD_FLOWID_LOCK(lock, flags);
329         dhdp->num_flow_rings = num_flow_rings;
330         dhdp->flowid_allocator = (void *)flowid_allocator;
331         dhdp->flow_ring_table = (void *)flow_ring_table;
332         dhdp->if_flow_lkup = (void *)if_flow_lkup;
333         dhdp->flowid_lock = lock;
334         dhdp->flow_rings_inited = TRUE;
335         dhdp->flowring_list_lock = list_lock;
336         DHD_FLOWID_UNLOCK(lock, flags);
337
338         DHD_INFO(("%s done\n", __FUNCTION__));
339         return BCME_OK;
340
341 lock_fail:
342         /* deinit the spinlock */
343         dhd_os_spin_lock_deinit(dhdp->osh, lock);
344
345 fail:
346         /* Destruct the per interface flow lkup table */
347         if (if_flow_lkup != NULL) {
348                 DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
349         }
350         if (flow_ring_table != NULL) {
351                 for (idx = 0; idx < num_flow_rings; idx++) {
352                         if (flow_ring_table[idx].lock != NULL)
353                                 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
354                 }
355                 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
356         }
357         id16_map_fini(dhdp->osh, flowid_allocator);
358
359         return BCME_NOMEM;
360 }
361
362 /** Deinit Flow Ring specific data structures */
363 void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
364 {
365         uint16 idx;
366         uint32 flow_ring_table_sz;
367         uint32 if_flow_lkup_sz;
368         flow_ring_table_t *flow_ring_table;
369         unsigned long flags;
370         void *lock;
371
372         DHD_INFO(("dhd_flow_rings_deinit\n"));
373
374         if (!(dhdp->flow_rings_inited)) {
375                 DHD_ERROR(("dhd_flow_rings not initialized!\n"));
376                 return;
377         }
378
379         if (dhdp->flow_ring_table != NULL) {
380
381                 ASSERT(dhdp->num_flow_rings > 0);
382
383                 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
384                 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
385                 dhdp->flow_ring_table = NULL;
386                 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
387                 for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
388                         if (flow_ring_table[idx].active) {
389                                 dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
390                         }
391                         ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
392
393                         /* Deinit flow ring queue locks before destroying flow ring table */
394                         dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
395                         flow_ring_table[idx].lock = NULL;
396
397                 }
398
399                 /* Destruct the flow ring table */
400                 flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
401                 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
402         }
403
404         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
405
406         /* Destruct the per interface flow lkup table */
407         if (dhdp->if_flow_lkup != NULL) {
408                 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
409                 bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
410                 DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
411                 dhdp->if_flow_lkup = NULL;
412         }
413
414         /* Destruct the flowid allocator */
415         if (dhdp->flowid_allocator != NULL)
416                 dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
417
418         dhdp->num_flow_rings = 0U;
419         bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
420
421         lock = dhdp->flowid_lock;
422         dhdp->flowid_lock = NULL;
423
424         DHD_FLOWID_UNLOCK(lock, flags);
425         dhd_os_spin_lock_deinit(dhdp->osh, lock);
426
427         dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
428         dhdp->flowring_list_lock = NULL;
429
430         ASSERT(dhdp->if_flow_lkup == NULL);
431         ASSERT(dhdp->flowid_allocator == NULL);
432         ASSERT(dhdp->flow_ring_table == NULL);
433         dhdp->flow_rings_inited = FALSE;
434 }
435
436 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
437 uint8
438 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
439 {
440         if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
441         ASSERT(if_flow_lkup);
442         return if_flow_lkup[ifindex].role;
443 }
444
445 #ifdef WLTDLS
446 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
447 {
448         tdls_peer_node_t *cur = dhdp->peer_tbl.node;
449         while (cur != NULL) {
450                 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
451                         return TRUE;
452                 }
453                 cur = cur->next;
454         }
455         return FALSE;
456 }
457 #endif /* WLTDLS */
458
459 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
460 static INLINE uint16
461 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
462 {
463         int hash;
464         bool ismcast = FALSE;
465         flow_hash_info_t *cur;
466         if_flow_lkup_t *if_flow_lkup;
467         unsigned long flags;
468
469         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
470         if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
471
472         ASSERT(if_flow_lkup);
473
474         if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
475 #ifdef WLTDLS
476                 if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
477                         is_tdls_destination(dhdp, da)) {
478                         hash = DHD_FLOWRING_HASHINDEX(da, prio);
479                         cur = if_flow_lkup[ifindex].fl_hash[hash];
480                         while (cur != NULL) {
481                                 if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
482                                         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
483                                         return cur->flowid;
484                                 }
485                                 cur = cur->next;
486                         }
487                         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
488                         return FLOWID_INVALID;
489                 }
490 #endif /* WLTDLS */
491                 cur = if_flow_lkup[ifindex].fl_hash[prio];
492                 if (cur) {
493                         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
494                         return cur->flowid;
495                 }
496         } else {
497
498                 if (ETHER_ISMULTI(da)) {
499                         ismcast = TRUE;
500                         hash = 0;
501                 } else {
502                         hash = DHD_FLOWRING_HASHINDEX(da, prio);
503                 }
504
505                 cur = if_flow_lkup[ifindex].fl_hash[hash];
506
507                 while (cur) {
508                         if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
509                                 (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
510                                 (cur->flow_info.tid == prio))) {
511                                 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
512                                 return cur->flowid;
513                         }
514                         cur = cur->next;
515                 }
516         }
517         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
518
519         DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
520         return FLOWID_INVALID;
521 } /* dhd_flowid_find */
522
523 /** Create unique Flow ID, called when a flow ring is created. */
524 static INLINE uint16
525 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
526 {
527         flow_hash_info_t *fl_hash_node, *cur;
528         if_flow_lkup_t *if_flow_lkup;
529         int hash;
530         uint16 flowid;
531         unsigned long flags;
532
533         fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t));
534         memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
535
536         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
537         ASSERT(dhdp->flowid_allocator != NULL);
538         flowid = id16_map_alloc(dhdp->flowid_allocator);
539         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
540
541         if (flowid == FLOWID_INVALID) {
542                 MFREE(dhdp->osh, fl_hash_node,  sizeof(flow_hash_info_t));
543                 DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
544                 return FLOWID_INVALID;
545         }
546
547         fl_hash_node->flowid = flowid;
548         fl_hash_node->flow_info.tid = prio;
549         fl_hash_node->flow_info.ifindex = ifindex;
550         fl_hash_node->next = NULL;
551
552         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
553         if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
554
555         if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
556                 /* For STA non TDLS dest we allocate entry based on prio only */
557 #ifdef WLTDLS
558                 if (dhdp->peer_tbl.tdls_peer_count &&
559                         (is_tdls_destination(dhdp, da))) {
560                         hash = DHD_FLOWRING_HASHINDEX(da, prio);
561                         cur = if_flow_lkup[ifindex].fl_hash[hash];
562                         if (cur) {
563                                 while (cur->next) {
564                                         cur = cur->next;
565                                 }
566                                 cur->next = fl_hash_node;
567                         } else {
568                                 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
569                         }
570                 } else
571 #endif /* WLTDLS */
572                         if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
573         } else {
574
575                 /* For bcast/mcast assign first slot in in interface */
576                 hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
577                 cur = if_flow_lkup[ifindex].fl_hash[hash];
578                 if (cur) {
579                         while (cur->next) {
580                                 cur = cur->next;
581                         }
582                         cur->next = fl_hash_node;
583                 } else
584                         if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
585         }
586         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
587
588         DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
589
590         return fl_hash_node->flowid;
591 } /* dhd_flowid_alloc */
592
593 /** Get flow ring ID, if not present try to create one */
594 static INLINE int
595 dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
596                   uint8 prio, char *sa, char *da, uint16 *flowid)
597 {
598         uint16 id;
599         flow_ring_node_t *flow_ring_node;
600         flow_ring_table_t *flow_ring_table;
601         unsigned long flags;
602         int ret;
603
604         DHD_INFO(("%s\n", __FUNCTION__));
605
606         if (!dhdp->flow_ring_table) {
607                 return BCME_ERROR;
608         }
609
610         flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
611
612         id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
613
614         if (id == FLOWID_INVALID) {
615
616                 if_flow_lkup_t *if_flow_lkup;
617                 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
618
619                 if (!if_flow_lkup[ifindex].status)
620                         return BCME_ERROR;
621
622
623                 id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
624                 if (id == FLOWID_INVALID) {
625                         DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
626                                    __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
627                         return BCME_ERROR;
628                 }
629
630                 /* register this flowid in dhd_pub */
631                 dhd_add_flowid(dhdp, ifindex, prio, da, id);
632
633                 ASSERT(id < dhdp->num_flow_rings);
634
635                 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
636
637                 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
638
639                 /* Init Flow info */
640                 memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
641                 memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
642                 flow_ring_node->flow_info.tid = prio;
643                 flow_ring_node->flow_info.ifindex = ifindex;
644                 flow_ring_node->active = TRUE;
645                 flow_ring_node->status = FLOW_RING_STATUS_PENDING;
646                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
647
648                 /* Create and inform device about the new flow */
649                 if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
650                                 != BCME_OK) {
651                         DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
652                         return BCME_ERROR;
653                 }
654
655                 *flowid = id;
656                 return BCME_OK;
657         } else {
658                 /* if the Flow id was found in the hash */
659                 ASSERT(id < dhdp->num_flow_rings);
660
661                 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
662                 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
663
664                 /*
665                  * If the flow_ring_node is in Open State or Status pending state then
666                  * we can return the Flow id to the caller.If the flow_ring_node is in
667                  * FLOW_RING_STATUS_PENDING this means the creation is in progress and
668                  * hence the packets should be queued.
669                  *
670                  * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
671                  * FLOW_RING_STATUS_CLOSED, then we should return Error.
672                  * Note that if the flowing is being deleted we would mark it as
673                  * FLOW_RING_STATUS_DELETE_PENDING.  Now before Dongle could respond and
674                  * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
675                  * We should drop the packets in that case.
676                  * The decission to return OK should NOT be based on 'active' variable, beause
677                  * active is made TRUE when a flow_ring_node gets allocated and is made
678                  * FALSE when the flow ring gets removed and does not reflect the True state
679                  * of the Flow ring.
680                  */
681                 if (flow_ring_node->status == FLOW_RING_STATUS_OPEN ||
682                         flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
683                         *flowid = id;
684                         ret = BCME_OK;
685                 } else {
686                         *flowid = FLOWID_INVALID;
687                         ret = BCME_ERROR;
688                 }
689
690                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
691                 return ret;
692
693         } /* Flow Id found in the hash */
694 } /* dhd_flowid_lookup */
695
696 /**
697  * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
698  * select the flowring to send the packet to the dongle.
699  */
700 int BCMFASTPATH
701 dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
702 {
703         uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
704         struct ether_header *eh = (struct ether_header *)pktdata;
705         uint16 flowid;
706
707         ASSERT(ifindex < DHD_MAX_IFS);
708
709         if (ifindex >= DHD_MAX_IFS) {
710                 return BCME_BADARG;
711         }
712
713         if (!dhdp->flowid_allocator) {
714                 DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
715                 return BCME_ERROR;
716         }
717
718         if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost,
719                 &flowid) != BCME_OK) {
720                 return BCME_ERROR;
721         }
722
723         DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
724
725         /* Tag the packet with flowid */
726         DHD_PKT_SET_FLOWID(pktbuf, flowid);
727         return BCME_OK;
728 }
729
730 void
731 dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
732 {
733         int hashix;
734         bool found = FALSE;
735         flow_hash_info_t *cur, *prev;
736         if_flow_lkup_t *if_flow_lkup;
737         unsigned long flags;
738
739         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
740         if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
741
742         for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
743
744                 cur = if_flow_lkup[ifindex].fl_hash[hashix];
745
746                 if (cur) {
747                         if (cur->flowid == flowid) {
748                                 found = TRUE;
749                         }
750
751                         prev = NULL;
752                         while (!found && cur) {
753                                 if (cur->flowid == flowid) {
754                                         found = TRUE;
755                                         break;
756                                 }
757                                 prev = cur;
758                                 cur = cur->next;
759                         }
760                         if (found) {
761                                 if (!prev) {
762                                         if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
763                                 } else {
764                                         prev->next = cur->next;
765                                 }
766
767                                 /* deregister flowid from dhd_pub. */
768                                 dhd_del_flowid(dhdp, ifindex, flowid);
769
770                                 id16_map_free(dhdp->flowid_allocator, flowid);
771                                 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
772                                 MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
773
774                                 return;
775                         }
776                 }
777         }
778
779         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
780         DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
781                    __FUNCTION__, flowid));
782 } /* dhd_flowid_free */
783
784 /**
785  * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle
786  * indicates that a wireless link has gone down.
787  */
788 void
789 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
790 {
791         uint32 id;
792         flow_ring_table_t *flow_ring_table;
793
794         DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
795
796         ASSERT(ifindex < DHD_MAX_IFS);
797         if (ifindex >= DHD_MAX_IFS)
798                 return;
799
800         if (!dhdp->flow_ring_table)
801                 return;
802
803         flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
804         for (id = 0; id < dhdp->num_flow_rings; id++) {
805                 if (flow_ring_table[id].active &&
806                     (flow_ring_table[id].flow_info.ifindex == ifindex)) {
807                         dhd_bus_flow_ring_delete_request(dhdp->bus,
808                                                          (void *) &flow_ring_table[id]);
809                 }
810         }
811 }
812
813 /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
814 void
815 dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
816 {
817         uint32 id;
818         flow_ring_table_t *flow_ring_table;
819
820         DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
821
822         ASSERT(ifindex < DHD_MAX_IFS);
823         if (ifindex >= DHD_MAX_IFS)
824                 return;
825
826         if (!dhdp->flow_ring_table)
827                 return;
828
829         flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
830         for (id = 0; id < dhdp->num_flow_rings; id++) {
831                 if (flow_ring_table[id].active &&
832                         (flow_ring_table[id].flow_info.ifindex == ifindex) &&
833                         (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
834                         (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
835                         DHD_INFO(("%s: deleting flowid %d\n",
836                                 __FUNCTION__, flow_ring_table[id].flowid));
837                         dhd_bus_flow_ring_delete_request(dhdp->bus,
838                                 (void *) &flow_ring_table[id]);
839                 }
840         }
841 }
842
843 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
844 void
845 dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
846                                uint8 op, uint8 role)
847 {
848         if_flow_lkup_t *if_flow_lkup;
849         unsigned long flags;
850
851         ASSERT(ifindex < DHD_MAX_IFS);
852         if (ifindex >= DHD_MAX_IFS)
853                 return;
854
855         DHD_INFO(("%s: ifindex %u op %u role is %u \n",
856                   __FUNCTION__, ifindex, op, role));
857         if (!dhdp->flowid_allocator) {
858                 DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
859                 return;
860         }
861
862         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
863         if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
864
865         if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
866
867                 if_flow_lkup[ifindex].role = role;
868
869                 if (role != WLC_E_IF_ROLE_STA) {
870                         if_flow_lkup[ifindex].status = TRUE;
871                         DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
872                                   __FUNCTION__, ifindex, role));
873                         /* Create Mcast Flow */
874                 }
875         } else  if (op == WLC_E_IF_DEL) {
876                 if_flow_lkup[ifindex].status = FALSE;
877                 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
878                           __FUNCTION__, ifindex, role));
879         }
880         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
881 }
882
883 /** Handles a STA 'link' indication from the dongle */
884 int
885 dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
886 {
887         if_flow_lkup_t *if_flow_lkup;
888         unsigned long flags;
889
890         ASSERT(ifindex < DHD_MAX_IFS);
891         if (ifindex >= DHD_MAX_IFS)
892                 return BCME_BADARG;
893
894         DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
895
896         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
897         if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
898
899         if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
900                 if (status)
901                         if_flow_lkup[ifindex].status = TRUE;
902                 else
903                         if_flow_lkup[ifindex].status = FALSE;
904         }
905         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
906
907         return BCME_OK;
908 }
909
910 /** Update flow priority mapping, called on IOVAR */
911 int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
912 {
913         uint16 flowid;
914         flow_ring_node_t *flow_ring_node;
915
916         if (map > DHD_FLOW_PRIO_LLR_MAP)
917                 return BCME_BADOPTION;
918
919         /* Check if we need to change prio map */
920         if (map == dhdp->flow_prio_map_type)
921                 return BCME_OK;
922
923         /* If any ring is active we cannot change priority mapping for flow rings */
924         for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
925                 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
926                 if (flow_ring_node->active)
927                         return BCME_EPERM;
928         }
929
930         /* Inform firmware about new mapping type */
931         if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
932                 return BCME_ERROR;
933
934         /* update internal structures */
935         dhdp->flow_prio_map_type = map;
936         if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
937                 bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
938         else
939                 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
940
941         return BCME_OK;
942 }
943
944 /** Inform firmware on updated flow priority mapping, called on IOVAR */
945 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
946 {
947         uint8 iovbuf[24];
948         if (!set) {
949                 bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
950                 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
951                         DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
952                         return BCME_ERROR;
953                 }
954                 *map = iovbuf[0];
955                 return BCME_OK;
956         }
957         bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
958         if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
959                 DHD_ERROR(("%s: failed to set fl_prio_map \n",
960                         __FUNCTION__));
961                 return BCME_ERROR;
962         }
963         return BCME_OK;
964 }