2 * HND generic packet pool operation primitives
4 * $Copyright Open Broadcom Corporation$
12 #include <hnd_pktpool.h>
14 /* Registry size is one larger than max pools, as slot #0 is reserved */
15 #define PKTPOOLREG_RSVD_ID (0U)
16 #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
17 #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
19 #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
20 #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
22 /* Tag a registry entry as free for use */
23 #define PKTPOOL_REGISTRY_CLR(id) \
24 PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
25 #define PKTPOOL_REGISTRY_ISCLR(id) \
26 (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
28 /* Tag registry entry 0 as reserved */
29 #define PKTPOOL_REGISTRY_RSV() \
30 PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
31 #define PKTPOOL_REGISTRY_ISRSVD() \
32 (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
34 /* Walk all un-reserved entries in registry */
35 #define PKTPOOL_REGISTRY_FOREACH(id) \
36 for ((id) = 1U; (id) <= pktpools_max; (id)++)
38 uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
39 pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
41 /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
42 static int pktpool_register(pktpool_t * poolptr);
43 static int pktpool_deregister(pktpool_t * poolptr);
45 /** accessor functions required when ROMming this file, forced into RAM */
47 BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
49 pktpools_registry[id] = pp;
53 BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
55 return pktpools_registry[id] == pp;
58 int /* Construct a pool registry to serve a maximum of total_pools */
59 pktpool_attach(osl_t *osh, uint32 total_pools)
63 if (pktpools_max != 0U) {
67 ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
69 /* Initialize registry: reserve slot#0 and tag others as free */
70 PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
72 PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
73 PKTPOOL_REGISTRY_CLR(poolid);
76 pktpools_max = total_pools;
78 return (int)pktpools_max;
81 int /* Destruct the pool registry. Ascertain all pools were first de-inited */
82 pktpool_dettach(osl_t *osh)
86 if (pktpools_max == 0U) {
90 /* Ascertain that no pools are still registered */
91 ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
93 PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
94 ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
97 pktpools_max = 0U; /* restore boot state */
102 static int /* Register a pool in a free slot; return the registry slot index */
103 pktpool_register(pktpool_t * poolptr)
107 if (pktpools_max == 0U) {
108 return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
111 ASSERT(pktpools_max != 0U);
113 /* find an empty slot in pktpools_registry */
114 PKTPOOL_REGISTRY_FOREACH(poolid) {
115 if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
116 PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
117 return (int)poolid; /* return pool ID */
121 return PKTPOOL_INVALID_ID; /* error: registry is full */
124 static int /* Deregister a pktpool, given the pool pointer; tag slot as free */
125 pktpool_deregister(pktpool_t * poolptr)
129 ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
131 poolid = POOLID(poolptr);
132 ASSERT(poolid <= pktpools_max);
134 /* Asertain that a previously registered poolptr is being de-registered */
135 if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
136 PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
139 return BCME_ERROR; /* mismatch in registry */
148 * User provides a pktpool_t sturcture and specifies the number of packets to
149 * be pre-filled into the pool (pplen). The size of all packets in a pool must
150 * be the same and is specified by plen.
151 * pktpool_init first attempts to register the pool and fetch a unique poolid.
152 * If registration fails, it is considered an BCME_ERR, caused by either the
153 * registry was not pre-created (pktpool_attach) or the registry is full.
154 * If registration succeeds, then the requested number of packets will be filled
155 * into the pool as part of initialization. In the event that there is no
156 * available memory to service the request, then BCME_NOMEM will be returned
157 * along with the count of how many packets were successfully allocated.
158 * In dongle builds, prior to memory reclaimation, one should limit the number
159 * of packets to be allocated during pktpool_init and fill the pool up after
163 pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
165 int i, err = BCME_OK;
169 ASSERT(pktp != NULL);
171 ASSERT(pplen != NULL);
175 bzero(pktp, sizeof(pktpool_t));
177 /* assign a unique pktpool id */
178 if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
181 POOLSETID(pktp, pktp_id);
184 pktp->istx = istx ? TRUE : FALSE;
185 pktp->plen = (uint16)plen;
188 pktp->maxlen = PKTPOOL_LEN_MAX;
189 pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
191 for (i = 0; i < pktplen; i++) {
193 p = PKTGET(osh, plen, TRUE);
196 /* Not able to allocate all requested pkts
197 * so just return what was actually allocated
198 * We can add to the pool later
200 if (pktp->freelist == NULL) /* pktpool free list is empty */
206 PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
208 PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
214 pktp->dbg_q[pktp->dbg_qlen++].p = p;
219 pktp->len = pktp->avail;
227 * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
228 * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
229 * An assert is in place to ensure that there are no packets still lingering
230 * around. Packets freed to a pool after the deinit will cause a memory
231 * corruption as the pktpool_t structure no longer exists.
234 pktpool_deinit(osl_t *osh, pktpool_t *pktp)
239 ASSERT(pktp != NULL);
244 for (i = 0; i <= pktp->len; i++) {
245 pktp->dbg_q[i].p = NULL;
250 while (pktp->freelist != NULL) {
251 void * p = pktp->freelist;
253 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
254 PKTSETFREELIST(p, NULL);
256 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
258 PKTFREE(osh, p, pktp->istx); /* free the packet */
261 ASSERT(freed <= pktp->len);
264 pktp->avail -= freed;
265 ASSERT(pktp->avail == 0);
269 pktpool_deregister(pktp); /* release previously acquired unique pool id */
270 POOLSETID(pktp, PKTPOOL_INVALID_ID);
272 pktp->inited = FALSE;
274 /* Are there still pending pkts? */
275 ASSERT(pktp->len == 0);
281 pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
285 int len, psize, maxlen;
287 ASSERT(pktp->plen != 0);
289 maxlen = pktp->maxlen;
290 psize = minimal ? (maxlen >> 2) : maxlen;
291 for (len = (int)pktp->len; len < psize; len++) {
293 p = PKTGET(osh, pktp->len, TRUE);
300 if (pktpool_add(pktp, p) != BCME_OK) {
301 PKTFREE(osh, p, FALSE);
311 pktpool_deq(pktpool_t *pktp)
315 if (pktp->avail == 0)
318 ASSERT(pktp->freelist != NULL);
320 p = pktp->freelist; /* dequeue packet from head of pktpool free list */
321 pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
322 PKTSETFREELIST(p, NULL);
330 pktpool_enq(pktpool_t *pktp, void *p)
334 PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
335 pktp->freelist = p; /* free list points to newly inserted packet */
338 ASSERT(pktp->avail <= pktp->len);
341 /* utility for registering host addr fill function called from pciedev */
344 (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
349 ASSERT(pktp->cbext.cb == NULL);
351 pktp->cbext.arg = arg;
356 pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
361 ASSERT(pktp->rxcplidfn.cb == NULL);
362 pktp->rxcplidfn.cb = cb;
363 pktp->rxcplidfn.arg = arg;
366 /* Callback functions for split rx modes */
367 /* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */
369 pktpool_invoke_dmarxfill(pktpool_t *pktp)
371 ASSERT(pktp->dmarxfill.cb);
372 ASSERT(pktp->dmarxfill.arg);
374 if (pktp->dmarxfill.cb)
375 pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
378 pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
383 pktp->dmarxfill.cb = cb;
384 pktp->dmarxfill.arg = arg;
388 /* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */
390 pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
397 if (i == PKTPOOL_CB_MAX)
400 ASSERT(pktp->cbs[i].cb == NULL);
401 pktp->cbs[i].cb = cb;
402 pktp->cbs[i].arg = arg;
409 pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
416 if (i == PKTPOOL_CB_MAX)
419 ASSERT(pktp->ecbs[i].cb == NULL);
420 pktp->ecbs[i].cb = cb;
421 pktp->ecbs[i].arg = arg;
428 pktpool_empty_notify(pktpool_t *pktp)
433 for (i = 0; i < pktp->ecbcnt; i++) {
434 ASSERT(pktp->ecbs[i].cb != NULL);
435 pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
444 pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
451 if (i == PKTPOOL_CB_MAX)
454 ASSERT(pktp->dbg_cbs[i].cb == NULL);
455 pktp->dbg_cbs[i].cb = cb;
456 pktp->dbg_cbs[i].arg = arg;
462 int pktpool_dbg_notify(pktpool_t *pktp);
465 pktpool_dbg_notify(pktpool_t *pktp)
469 for (i = 0; i < pktp->dbg_cbcnt; i++) {
470 ASSERT(pktp->dbg_cbs[i].cb);
471 pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
478 pktpool_dbg_dump(pktpool_t *pktp)
482 printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
483 for (i = 0; i < pktp->dbg_qlen; i++) {
484 ASSERT(pktp->dbg_q[i].p);
485 printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
486 pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
493 pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
498 bzero(stats, sizeof(pktpool_stats_t));
499 for (i = 0; i < pktp->dbg_qlen; i++) {
500 ASSERT(pktp->dbg_q[i].p != NULL);
502 state = PKTPOOLSTATE(pktp->dbg_q[i].p);
507 stats->txdh++; break;
509 stats->txd11++; break;
511 stats->rxdh++; break;
513 stats->rxd11++; break;
515 stats->rxfill++; break;
517 stats->idle++; break;
525 pktpool_start_trigger(pktpool_t *pktp, void *p)
529 if (!PKTPOOL(OSH_NULL, p))
532 OSL_GETCYCLES(cycles);
534 for (i = 0; i < pktp->dbg_qlen; i++) {
535 ASSERT(pktp->dbg_q[i].p != NULL);
537 if (pktp->dbg_q[i].p == p) {
538 pktp->dbg_q[i].cycles = cycles;
546 int pktpool_stop_trigger(pktpool_t *pktp, void *p);
548 pktpool_stop_trigger(pktpool_t *pktp, void *p)
552 if (!PKTPOOL(OSH_NULL, p))
555 OSL_GETCYCLES(cycles);
557 for (i = 0; i < pktp->dbg_qlen; i++) {
558 ASSERT(pktp->dbg_q[i].p != NULL);
560 if (pktp->dbg_q[i].p == p) {
561 if (pktp->dbg_q[i].cycles == 0)
564 if (cycles >= pktp->dbg_q[i].cycles)
565 pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
568 (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
570 pktp->dbg_q[i].cycles = 0;
577 #endif /* BCMDBG_POOL */
580 pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
583 pktp->availcb_excl = NULL;
588 pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
593 ASSERT(pktp->availcb_excl == NULL);
594 for (i = 0; i < pktp->cbcnt; i++) {
595 if (cb == pktp->cbs[i].cb) {
596 pktp->availcb_excl = &pktp->cbs[i];
601 if (pktp->availcb_excl == NULL)
608 pktpool_avail_notify(pktpool_t *pktp)
614 if (pktp->availcb_excl != NULL) {
615 pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
620 for (i = 0; i < pktp->cbcnt; i++) {
629 ASSERT(pktp->cbs[idx].cb != NULL);
630 pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
634 /* Alternate between filling from head or tail
642 pktpool_get(pktpool_t *pktp)
646 p = pktpool_deq(pktp);
649 /* Notify and try to reclaim tx pkts */
651 pktpool_empty_notify(pktp);
653 p = pktpool_deq(pktp);
662 pktpool_free(pktpool_t *pktp, void *p)
666 /* pktpool_stop_trigger(pktp, p); */
669 pktpool_enq(pktp, p);
671 if (pktp->emptycb_disable)
675 if (pktp->empty == FALSE)
676 pktpool_avail_notify(pktp);
681 pktpool_add(pktpool_t *pktp, void *p)
685 if (pktp->len == pktp->maxlen)
688 /* pkts in pool have same length */
689 ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
690 PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
693 pktpool_enq(pktp, p);
696 pktp->dbg_q[pktp->dbg_qlen++].p = p;
702 /* Force pktpool_setmaxlen () into RAM as it uses a constant
703 * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
706 BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
708 if (maxlen > PKTPOOL_LEN_MAX)
709 maxlen = PKTPOOL_LEN_MAX;
711 /* if pool is already beyond maxlen, then just cap it
712 * since we currently do not reduce the pool len
715 pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
721 pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
725 pktp->emptycb_disable = disable;
729 pktpool_emptycb_disabled(pktpool_t *pktp)
732 return pktp->emptycb_disable;