2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <asm/unaligned.h>
24 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
26 static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
27 static void ath6kl_htc_mbox_stop(struct htc_target *target);
28 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
29 struct list_head *pkt_queue);
30 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
31 struct ath6kl_htc_credit_info *cred_info,
32 u16 svc_pri_order[], int len);
34 /* threshold to re-enable Tx bundling for an AC*/
35 #define TX_RESUME_BUNDLE_THRESHOLD 1500
37 /* Functions for Tx credit handling */
38 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
39 struct htc_endpoint_credit_dist *ep_dist,
42 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
43 ep_dist->endpoint, credits);
45 ep_dist->credits += credits;
46 ep_dist->cred_assngd += credits;
47 cred_info->cur_free_credits -= credits;
50 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
51 struct list_head *ep_list,
54 struct htc_endpoint_credit_dist *cur_ep_dist;
57 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
59 cred_info->cur_free_credits = tot_credits;
60 cred_info->total_avail_credits = tot_credits;
62 list_for_each_entry(cur_ep_dist, ep_list, list) {
63 if (cur_ep_dist->endpoint == ENDPOINT_0)
66 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
68 if (tot_credits > 4) {
69 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
70 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
71 ath6kl_credit_deposit(cred_info,
73 cur_ep_dist->cred_min);
74 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
78 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
79 ath6kl_credit_deposit(cred_info, cur_ep_dist,
80 cur_ep_dist->cred_min);
82 * Control service is always marked active, it
83 * never goes inactive EVER.
85 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
86 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
87 /* this is the lowest priority data endpoint */
88 /* FIXME: this looks fishy, check */
89 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
92 * Streams have to be created (explicit | implicit) for all
93 * kinds of traffic. BE endpoints are also inactive in the
94 * beginning. When BE traffic starts it creates implicit
95 * streams that redistributes credits.
97 * Note: all other endpoints have minimums set but are
98 * initially given NO credits. credits will be distributed
99 * as traffic activity demands
103 WARN_ON(cred_info->cur_free_credits <= 0);
105 list_for_each_entry(cur_ep_dist, ep_list, list) {
106 if (cur_ep_dist->endpoint == ENDPOINT_0)
109 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
110 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
113 * For the remaining data endpoints, we assume that
114 * each cred_per_msg are the same. We use a simple
115 * calculation here, we take the remaining credits
116 * and determine how many max messages this can
117 * cover and then set each endpoint's normal value
118 * equal to 3/4 this amount.
120 count = (cred_info->cur_free_credits /
121 cur_ep_dist->cred_per_msg)
122 * cur_ep_dist->cred_per_msg;
123 count = (count * 3) >> 2;
124 count = max(count, cur_ep_dist->cred_per_msg);
125 cur_ep_dist->cred_norm = count;
129 ath6kl_dbg(ATH6KL_DBG_CREDIT,
130 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
131 cur_ep_dist->endpoint,
133 cur_ep_dist->credits,
134 cur_ep_dist->cred_per_msg,
135 cur_ep_dist->cred_norm,
136 cur_ep_dist->cred_min);
140 /* initialize and setup credit distribution */
141 static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
142 struct ath6kl_htc_credit_info *cred_info)
144 u16 servicepriority[5];
146 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
148 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
149 servicepriority[1] = WMI_DATA_VO_SVC;
150 servicepriority[2] = WMI_DATA_VI_SVC;
151 servicepriority[3] = WMI_DATA_BE_SVC;
152 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
154 /* set priority list */
155 ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
160 /* reduce an ep's credits back to a set limit */
161 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
162 struct htc_endpoint_credit_dist *ep_dist,
167 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
168 ep_dist->endpoint, limit);
170 ep_dist->cred_assngd = limit;
172 if (ep_dist->credits <= limit)
175 credits = ep_dist->credits - limit;
176 ep_dist->credits -= credits;
177 cred_info->cur_free_credits += credits;
180 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
181 struct list_head *epdist_list)
183 struct htc_endpoint_credit_dist *cur_list;
185 list_for_each_entry(cur_list, epdist_list, list) {
186 if (cur_list->endpoint == ENDPOINT_0)
189 if (cur_list->cred_to_dist > 0) {
190 cur_list->credits += cur_list->cred_to_dist;
191 cur_list->cred_to_dist = 0;
193 if (cur_list->credits > cur_list->cred_assngd)
194 ath6kl_credit_reduce(cred_info,
196 cur_list->cred_assngd);
198 if (cur_list->credits > cur_list->cred_norm)
199 ath6kl_credit_reduce(cred_info, cur_list,
200 cur_list->cred_norm);
202 if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
203 if (cur_list->txq_depth == 0)
204 ath6kl_credit_reduce(cred_info,
212 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
215 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
216 struct htc_endpoint_credit_dist *ep_dist)
218 struct htc_endpoint_credit_dist *curdist_list;
222 if (ep_dist->svc_id == WMI_CONTROL_SVC)
225 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
226 (ep_dist->svc_id == WMI_DATA_VO_SVC))
227 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
231 * For all other services, we follow a simple algorithm of:
233 * 1. checking the free pool for credits
234 * 2. checking lower priority endpoints for credits to take
237 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
239 if (credits >= ep_dist->seek_cred)
243 * We don't have enough in the free pool, try taking away from
244 * lower priority services The rule for taking away credits:
246 * 1. Only take from lower priority endpoints
247 * 2. Only take what is allocated above the minimum (never
248 * starve an endpoint completely)
249 * 3. Only take what you need.
252 list_for_each_entry_reverse(curdist_list,
253 &cred_info->lowestpri_ep_dist,
255 if (curdist_list == ep_dist)
258 need = ep_dist->seek_cred - cred_info->cur_free_credits;
260 if ((curdist_list->cred_assngd - need) >=
261 curdist_list->cred_min) {
263 * The current one has been allocated more than
264 * it's minimum and it has enough credits assigned
265 * above it's minimum to fulfill our need try to
266 * take away just enough to fulfill our need.
268 ath6kl_credit_reduce(cred_info, curdist_list,
269 curdist_list->cred_assngd - need);
271 if (cred_info->cur_free_credits >=
276 if (curdist_list->endpoint == ENDPOINT_0)
280 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
283 /* did we find some credits? */
285 ath6kl_credit_deposit(cred_info, ep_dist, credits);
287 ep_dist->seek_cred = 0;
290 /* redistribute credits based on activity change */
291 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
292 struct list_head *ep_dist_list)
294 struct htc_endpoint_credit_dist *curdist_list;
296 list_for_each_entry(curdist_list, ep_dist_list, list) {
297 if (curdist_list->endpoint == ENDPOINT_0)
300 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
301 (curdist_list->svc_id == WMI_DATA_BE_SVC))
302 curdist_list->dist_flags |= HTC_EP_ACTIVE;
304 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
305 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
306 if (curdist_list->txq_depth == 0)
307 ath6kl_credit_reduce(info, curdist_list, 0);
309 ath6kl_credit_reduce(info,
311 curdist_list->cred_min);
318 * This function is invoked whenever endpoints require credit
319 * distributions. A lock is held while this function is invoked, this
320 * function shall NOT block. The ep_dist_list is a list of distribution
321 * structures in prioritized order as defined by the call to the
322 * htc_set_credit_dist() api.
324 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
325 struct list_head *ep_dist_list,
326 enum htc_credit_dist_reason reason)
329 case HTC_CREDIT_DIST_SEND_COMPLETE:
330 ath6kl_credit_update(cred_info, ep_dist_list);
332 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
333 ath6kl_credit_redistribute(cred_info, ep_dist_list);
339 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
340 WARN_ON(cred_info->cur_free_credits < 0);
343 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
347 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
348 align_addr = PTR_ALIGN(*buf - 4, 4);
349 memmove(align_addr, *buf, len);
354 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
355 int ctrl0, int ctrl1)
357 struct htc_frame_hdr *hdr;
359 packet->buf -= HTC_HDR_LENGTH;
360 hdr = (struct htc_frame_hdr *)packet->buf;
363 put_unaligned((u16)packet->act_len, &hdr->payld_len);
365 hdr->eid = packet->endpoint;
366 hdr->ctrl[0] = ctrl0;
367 hdr->ctrl[1] = ctrl1;
370 static void htc_reclaim_txctrl_buf(struct htc_target *target,
371 struct htc_packet *pkt)
373 spin_lock_bh(&target->htc_lock);
374 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
375 spin_unlock_bh(&target->htc_lock);
378 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
381 struct htc_packet *packet = NULL;
382 struct list_head *buf_list;
384 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
386 spin_lock_bh(&target->htc_lock);
388 if (list_empty(buf_list)) {
389 spin_unlock_bh(&target->htc_lock);
393 packet = list_first_entry(buf_list, struct htc_packet, list);
394 list_del(&packet->list);
395 spin_unlock_bh(&target->htc_lock);
398 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
403 static void htc_tx_comp_update(struct htc_target *target,
404 struct htc_endpoint *endpoint,
405 struct htc_packet *packet)
407 packet->completion = NULL;
408 packet->buf += HTC_HDR_LENGTH;
413 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
414 packet->status, packet->endpoint, packet->act_len,
415 packet->info.tx.cred_used);
417 /* on failure to submit, reclaim credits for this packet */
418 spin_lock_bh(&target->tx_lock);
419 endpoint->cred_dist.cred_to_dist +=
420 packet->info.tx.cred_used;
421 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
423 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
424 target->credit_info, &target->cred_dist_list);
426 ath6kl_credit_distribute(target->credit_info,
427 &target->cred_dist_list,
428 HTC_CREDIT_DIST_SEND_COMPLETE);
430 spin_unlock_bh(&target->tx_lock);
433 static void htc_tx_complete(struct htc_endpoint *endpoint,
434 struct list_head *txq)
439 ath6kl_dbg(ATH6KL_DBG_HTC,
440 "htc tx complete ep %d pkts %d\n",
441 endpoint->eid, get_queue_depth(txq));
443 ath6kl_tx_complete(endpoint->target, txq);
446 static void htc_tx_comp_handler(struct htc_target *target,
447 struct htc_packet *packet)
449 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
450 struct list_head container;
452 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
453 packet->info.tx.seqno);
455 htc_tx_comp_update(target, endpoint, packet);
456 INIT_LIST_HEAD(&container);
457 list_add_tail(&packet->list, &container);
459 htc_tx_complete(endpoint, &container);
462 static void htc_async_tx_scat_complete(struct htc_target *target,
463 struct hif_scatter_req *scat_req)
465 struct htc_endpoint *endpoint;
466 struct htc_packet *packet;
467 struct list_head tx_compq;
470 INIT_LIST_HEAD(&tx_compq);
472 ath6kl_dbg(ATH6KL_DBG_HTC,
473 "htc tx scat complete len %d entries %d\n",
474 scat_req->len, scat_req->scat_entries);
476 if (scat_req->status)
477 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
479 packet = scat_req->scat_list[0].packet;
480 endpoint = &target->endpoint[packet->endpoint];
482 /* walk through the scatter list and process */
483 for (i = 0; i < scat_req->scat_entries; i++) {
484 packet = scat_req->scat_list[i].packet;
490 packet->status = scat_req->status;
491 htc_tx_comp_update(target, endpoint, packet);
492 list_add_tail(&packet->list, &tx_compq);
495 /* free scatter request */
496 hif_scatter_req_add(target->dev->ar, scat_req);
498 /* complete all packets */
499 htc_tx_complete(endpoint, &tx_compq);
502 static int ath6kl_htc_tx_issue(struct htc_target *target,
503 struct htc_packet *packet)
507 u32 padded_len, send_len;
509 if (!packet->completion)
512 send_len = packet->act_len + HTC_HDR_LENGTH;
514 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
516 ath6kl_dbg(ATH6KL_DBG_HTC,
517 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
518 send_len, packet->info.tx.seqno, padded_len,
519 target->dev->ar->mbox_info.htc_addr,
520 sync ? "sync" : "async");
523 status = hif_read_write_sync(target->dev->ar,
524 target->dev->ar->mbox_info.htc_addr,
525 packet->buf, padded_len,
526 HIF_WR_SYNC_BLOCK_INC);
528 packet->status = status;
529 packet->buf += HTC_HDR_LENGTH;
531 status = hif_write_async(target->dev->ar,
532 target->dev->ar->mbox_info.htc_addr,
533 packet->buf, padded_len,
534 HIF_WR_ASYNC_BLOCK_INC, packet);
539 static int htc_check_credits(struct htc_target *target,
540 struct htc_endpoint *ep, u8 *flags,
541 enum htc_endpoint_id eid, unsigned int len,
545 *req_cred = (len > target->tgt_cred_sz) ?
546 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
548 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
549 *req_cred, ep->cred_dist.credits);
551 if (ep->cred_dist.credits < *req_cred) {
552 if (eid == ENDPOINT_0)
555 /* Seek more credits */
556 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
558 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
560 ep->cred_dist.seek_cred = 0;
562 if (ep->cred_dist.credits < *req_cred) {
563 ath6kl_dbg(ATH6KL_DBG_CREDIT,
564 "credit not found for ep %d\n",
570 ep->cred_dist.credits -= *req_cred;
571 ep->ep_st.cred_cosumd += *req_cred;
573 /* When we are getting low on credits, ask for more */
574 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
575 ep->cred_dist.seek_cred =
576 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
578 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
580 /* see if we were successful in getting more */
581 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
582 /* tell the target we need credits ASAP! */
583 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
584 ep->ep_st.cred_low_indicate += 1;
585 ath6kl_dbg(ATH6KL_DBG_CREDIT,
586 "credit we need credits asap\n");
593 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
594 struct htc_endpoint *endpoint,
595 struct list_head *queue)
599 struct htc_packet *packet;
606 if (list_empty(&endpoint->txq))
608 packet = list_first_entry(&endpoint->txq, struct htc_packet,
611 ath6kl_dbg(ATH6KL_DBG_HTC,
612 "htc tx got packet 0x%p queue depth %d\n",
613 packet, get_queue_depth(&endpoint->txq));
615 len = CALC_TXRX_PADDED_LEN(target,
616 packet->act_len + HTC_HDR_LENGTH);
618 if (htc_check_credits(target, endpoint, &flags,
619 packet->endpoint, len, &req_cred))
622 /* now we can fully move onto caller's queue */
623 packet = list_first_entry(&endpoint->txq, struct htc_packet,
625 list_move_tail(&packet->list, queue);
627 /* save the number of credits this packet consumed */
628 packet->info.tx.cred_used = req_cred;
630 /* all TX packets are handled asynchronously */
631 packet->completion = htc_tx_comp_handler;
632 packet->context = target;
633 endpoint->ep_st.tx_issued += 1;
635 /* save send flags */
636 packet->info.tx.flags = flags;
637 packet->info.tx.seqno = endpoint->seqno;
642 /* See if the padded tx length falls on a credit boundary */
643 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
644 struct htc_endpoint *ep)
646 int rem_cred, cred_pad;
648 rem_cred = *len % cred_sz;
650 /* No padding needed */
654 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
658 * The transfer consumes a "partial" credit, this
659 * packet cannot be bundled unless we add
660 * additional "dummy" padding (max 255 bytes) to
661 * consume the entire credit.
663 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
665 if ((cred_pad > 0) && (cred_pad <= 255))
668 /* The amount of padding is too large, send as non-bundled */
674 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
675 struct htc_endpoint *endpoint,
676 struct hif_scatter_req *scat_req,
678 struct list_head *queue)
680 struct htc_packet *packet;
681 int i, len, rem_scat, cred_pad;
685 rem_scat = target->max_tx_bndl_sz;
687 for (i = 0; i < n_scat; i++) {
688 scat_req->scat_list[i].packet = NULL;
690 if (list_empty(queue))
693 packet = list_first_entry(queue, struct htc_packet, list);
694 len = CALC_TXRX_PADDED_LEN(target,
695 packet->act_len + HTC_HDR_LENGTH);
697 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
699 if (cred_pad < 0 || rem_scat < len) {
705 /* now remove it from the queue */
706 list_del(&packet->list);
708 scat_req->scat_list[i].packet = packet;
709 /* prepare packet and flag message as part of a send bundle */
710 flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
711 ath6kl_htc_tx_prep_pkt(packet, flags,
712 cred_pad, packet->info.tx.seqno);
713 /* Make sure the buffer is 4-byte aligned */
714 ath6kl_htc_tx_buf_align(&packet->buf,
715 packet->act_len + HTC_HDR_LENGTH);
716 scat_req->scat_list[i].buf = packet->buf;
717 scat_req->scat_list[i].len = len;
719 scat_req->len += len;
720 scat_req->scat_entries++;
721 ath6kl_dbg(ATH6KL_DBG_HTC,
722 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
723 i, packet, packet->info.tx.seqno, len, rem_scat);
726 /* Roll back scatter setup in case of any failure */
727 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
728 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
729 packet = scat_req->scat_list[i].packet;
731 packet->buf += HTC_HDR_LENGTH;
732 list_add(&packet->list, queue);
742 * Drain a queue and send as bundles this function may return without fully
743 * draining the queue when
745 * 1. scatter resources are exhausted
746 * 2. a message that will consume a partial credit will stop the
747 * bundling process early
748 * 3. we drop below the minimum number of messages for a bundle
750 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
751 struct list_head *queue,
752 int *sent_bundle, int *n_bundle_pkts)
754 struct htc_target *target = endpoint->target;
755 struct hif_scatter_req *scat_req = NULL;
756 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
761 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
762 (WMI_CONTROL_SVC != endpoint->svc_id))
763 ac = target->dev->ar->ep2ac_map[endpoint->eid];
767 n_scat = get_queue_depth(queue);
768 n_scat = min(n_scat, target->msg_per_bndl_max);
770 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
771 /* not enough to bundle */
774 scat_req = hif_scatter_req_get(target->dev->ar);
777 /* no scatter resources */
778 ath6kl_dbg(ATH6KL_DBG_HTC,
779 "htc tx no more scatter resources\n");
783 if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
786 * BE, BK have priorities and bit
789 txb_mask = (1 << WMM_AC_BK);
792 * any AC with priority lower than
795 txb_mask = ((1 << ac) - 1);
797 * when the scatter request resources drop below a
798 * certain threshold, disable Tx bundling for all
799 * AC's with priority lower than the current requesting
800 * AC. Otherwise re-enable Tx bundling for them
802 if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
803 target->tx_bndl_mask &= ~txb_mask;
805 target->tx_bndl_mask |= txb_mask;
808 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
812 scat_req->scat_entries = 0;
814 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
817 if (status == -EAGAIN) {
818 hif_scatter_req_add(target->dev->ar, scat_req);
822 /* send path is always asynchronous */
823 scat_req->complete = htc_async_tx_scat_complete;
825 tot_pkts_bundle += scat_req->scat_entries;
827 ath6kl_dbg(ATH6KL_DBG_HTC,
828 "htc tx scatter bytes %d entries %d\n",
829 scat_req->len, scat_req->scat_entries);
830 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
836 *sent_bundle = n_sent_bundle;
837 *n_bundle_pkts = tot_pkts_bundle;
838 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
844 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
845 struct htc_endpoint *endpoint)
847 struct list_head txq;
848 struct htc_packet *packet;
853 spin_lock_bh(&target->tx_lock);
855 endpoint->tx_proc_cnt++;
856 if (endpoint->tx_proc_cnt > 1) {
857 endpoint->tx_proc_cnt--;
858 spin_unlock_bh(&target->tx_lock);
859 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
864 * drain the endpoint TX queue for transmission as long
865 * as we have enough credits.
867 INIT_LIST_HEAD(&txq);
869 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
870 (WMI_CONTROL_SVC != endpoint->svc_id))
871 ac = target->dev->ar->ep2ac_map[endpoint->eid];
875 if (list_empty(&endpoint->txq))
878 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
880 if (list_empty(&txq))
883 spin_unlock_bh(&target->tx_lock);
889 /* try to send a bundle on each pass */
890 if ((target->tx_bndl_mask) &&
891 (get_queue_depth(&txq) >=
892 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
893 int temp1 = 0, temp2 = 0;
895 /* check if bundling is enabled for an AC */
896 if (target->tx_bndl_mask & (1 << ac)) {
897 ath6kl_htc_tx_bundle(endpoint, &txq,
899 bundle_sent += temp1;
900 n_pkts_bundle += temp2;
904 if (list_empty(&txq))
907 packet = list_first_entry(&txq, struct htc_packet,
909 list_del(&packet->list);
911 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
912 0, packet->info.tx.seqno);
913 ath6kl_htc_tx_issue(target, packet);
916 spin_lock_bh(&target->tx_lock);
918 endpoint->ep_st.tx_bundles += bundle_sent;
919 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
922 * if an AC has bundling disabled and no tx bundling
923 * has occured continously for a certain number of TX,
924 * enable tx bundling for this AC
927 if (!(target->tx_bndl_mask & (1 << ac)) &&
929 if (++target->ac_tx_count[ac] >=
930 TX_RESUME_BUNDLE_THRESHOLD) {
931 target->ac_tx_count[ac] = 0;
932 target->tx_bndl_mask |= (1 << ac);
936 /* tx bundling will reset the counter */
938 target->ac_tx_count[ac] = 0;
942 endpoint->tx_proc_cnt = 0;
943 spin_unlock_bh(&target->tx_lock);
946 static bool ath6kl_htc_tx_try(struct htc_target *target,
947 struct htc_endpoint *endpoint,
948 struct htc_packet *tx_pkt)
950 struct htc_ep_callbacks ep_cb;
952 bool overflow = false;
954 ep_cb = endpoint->ep_cb;
956 spin_lock_bh(&target->tx_lock);
957 txq_depth = get_queue_depth(&endpoint->txq);
958 spin_unlock_bh(&target->tx_lock);
960 if (txq_depth >= endpoint->max_txq_depth)
964 ath6kl_dbg(ATH6KL_DBG_HTC,
965 "htc tx overflow ep %d depth %d max %d\n",
966 endpoint->eid, txq_depth,
967 endpoint->max_txq_depth);
969 if (overflow && ep_cb.tx_full) {
970 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
971 HTC_SEND_FULL_DROP) {
972 endpoint->ep_st.tx_dropped += 1;
977 spin_lock_bh(&target->tx_lock);
978 list_add_tail(&tx_pkt->list, &endpoint->txq);
979 spin_unlock_bh(&target->tx_lock);
981 ath6kl_htc_tx_from_queue(target, endpoint);
986 static void htc_chk_ep_txq(struct htc_target *target)
988 struct htc_endpoint *endpoint;
989 struct htc_endpoint_credit_dist *cred_dist;
992 * Run through the credit distribution list to see if there are
993 * packets queued. NOTE: no locks need to be taken since the
994 * distribution list is not dynamic (cannot be re-ordered) and we
995 * are not modifying any state.
997 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
998 endpoint = cred_dist->htc_ep;
1000 spin_lock_bh(&target->tx_lock);
1001 if (!list_empty(&endpoint->txq)) {
1002 ath6kl_dbg(ATH6KL_DBG_HTC,
1003 "htc creds ep %d credits %d pkts %d\n",
1004 cred_dist->endpoint,
1005 endpoint->cred_dist.credits,
1006 get_queue_depth(&endpoint->txq));
1007 spin_unlock_bh(&target->tx_lock);
1009 * Try to start the stalled queue, this list is
1010 * ordered by priority. If there are credits
1011 * available the highest priority queue will get a
1012 * chance to reclaim credits from lower priority
1015 ath6kl_htc_tx_from_queue(target, endpoint);
1016 spin_lock_bh(&target->tx_lock);
1018 spin_unlock_bh(&target->tx_lock);
1022 static int htc_setup_tx_complete(struct htc_target *target)
1024 struct htc_packet *send_pkt = NULL;
1027 send_pkt = htc_get_control_buf(target, true);
1032 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
1033 struct htc_setup_comp_ext_msg *setup_comp_ext;
1037 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
1038 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
1039 setup_comp_ext->msg_id =
1040 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1042 if (target->msg_per_bndl_max > 0) {
1043 /* Indicate HTC bundling to the target */
1044 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
1045 setup_comp_ext->msg_per_rxbndl =
1046 target->msg_per_bndl_max;
1049 memcpy(&setup_comp_ext->flags, &flags,
1050 sizeof(setup_comp_ext->flags));
1051 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
1052 sizeof(struct htc_setup_comp_ext_msg),
1053 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1056 struct htc_setup_comp_msg *setup_comp;
1057 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
1058 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
1059 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
1060 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
1061 sizeof(struct htc_setup_comp_msg),
1062 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1065 /* we want synchronous operation */
1066 send_pkt->completion = NULL;
1067 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
1068 status = ath6kl_htc_tx_issue(target, send_pkt);
1070 if (send_pkt != NULL)
1071 htc_reclaim_txctrl_buf(target, send_pkt);
1076 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1077 struct ath6kl_htc_credit_info *credit_info,
1078 u16 srvc_pri_order[], int list_len)
1080 struct htc_endpoint *endpoint;
1083 target->credit_info = credit_info;
1085 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1086 &target->cred_dist_list);
1088 for (i = 0; i < list_len; i++) {
1089 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1090 endpoint = &target->endpoint[ep];
1091 if (endpoint->svc_id == srvc_pri_order[i]) {
1092 list_add_tail(&endpoint->cred_dist.list,
1093 &target->cred_dist_list);
1097 if (ep >= ENDPOINT_MAX) {
1104 static int ath6kl_htc_mbox_tx(struct htc_target *target,
1105 struct htc_packet *packet)
1107 struct htc_endpoint *endpoint;
1108 struct list_head queue;
1110 ath6kl_dbg(ATH6KL_DBG_HTC,
1111 "htc tx ep id %d buf 0x%p len %d\n",
1112 packet->endpoint, packet->buf, packet->act_len);
1114 if (packet->endpoint >= ENDPOINT_MAX) {
1119 endpoint = &target->endpoint[packet->endpoint];
1121 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1122 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1123 -ECANCELED : -ENOSPC;
1124 INIT_LIST_HEAD(&queue);
1125 list_add(&packet->list, &queue);
1126 htc_tx_complete(endpoint, &queue);
1132 /* flush endpoint TX queue */
1133 static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1134 enum htc_endpoint_id eid, u16 tag)
1136 struct htc_packet *packet, *tmp_pkt;
1137 struct list_head discard_q, container;
1138 struct htc_endpoint *endpoint = &target->endpoint[eid];
1140 if (!endpoint->svc_id) {
1145 /* initialize the discard queue */
1146 INIT_LIST_HEAD(&discard_q);
1148 spin_lock_bh(&target->tx_lock);
1150 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1151 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1152 (tag == packet->info.tx.tag))
1153 list_move_tail(&packet->list, &discard_q);
1156 spin_unlock_bh(&target->tx_lock);
1158 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1159 packet->status = -ECANCELED;
1160 list_del(&packet->list);
1161 ath6kl_dbg(ATH6KL_DBG_HTC,
1162 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
1163 packet, packet->act_len,
1164 packet->endpoint, packet->info.tx.tag);
1166 INIT_LIST_HEAD(&container);
1167 list_add_tail(&packet->list, &container);
1168 htc_tx_complete(endpoint, &container);
1173 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1175 struct htc_endpoint *endpoint;
1178 dump_cred_dist_stats(target);
1180 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1181 endpoint = &target->endpoint[i];
1182 if (endpoint->svc_id == 0)
1185 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1189 static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1190 enum htc_endpoint_id eid,
1193 struct htc_endpoint *endpoint = &target->endpoint[eid];
1196 if (endpoint->svc_id == 0) {
1201 spin_lock_bh(&target->tx_lock);
1204 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1205 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1209 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1210 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1216 endpoint->cred_dist.txq_depth =
1217 get_queue_depth(&endpoint->txq);
1219 ath6kl_dbg(ATH6KL_DBG_HTC,
1220 "htc tx activity ctxt 0x%p dist 0x%p\n",
1221 target->credit_info, &target->cred_dist_list);
1223 ath6kl_credit_distribute(target->credit_info,
1224 &target->cred_dist_list,
1225 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1228 spin_unlock_bh(&target->tx_lock);
1230 if (dist && !active)
1231 htc_chk_ep_txq(target);
1236 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1239 endpoint->ep_st.rx_pkts++;
1240 if (n_look_ahds == 1)
1241 endpoint->ep_st.rx_lkahds++;
1242 else if (n_look_ahds > 1)
1243 endpoint->ep_st.rx_bundle_lkahd++;
1246 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1247 enum htc_endpoint_id eid, int len)
1249 return (eid == target->dev->ar->ctrl_ep) ?
1250 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1253 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1255 struct list_head queue;
1257 INIT_LIST_HEAD(&queue);
1258 list_add_tail(&packet->list, &queue);
1259 return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1262 static void htc_reclaim_rxbuf(struct htc_target *target,
1263 struct htc_packet *packet,
1264 struct htc_endpoint *ep)
1266 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1267 htc_rxpkt_reset(packet);
1268 packet->status = -ECANCELED;
1269 ep->ep_cb.rx(ep->target, packet);
1271 htc_rxpkt_reset(packet);
1272 htc_add_rxbuf((void *)(target), packet);
1276 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1277 struct htc_packet *packet)
1279 spin_lock_bh(&target->htc_lock);
1280 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1281 spin_unlock_bh(&target->htc_lock);
1284 static int ath6kl_htc_rx_packet(struct htc_target *target,
1285 struct htc_packet *packet,
1288 struct ath6kl_device *dev = target->dev;
1292 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1294 if (padded_len > packet->buf_len) {
1295 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1296 padded_len, rx_len, packet->buf_len);
1300 ath6kl_dbg(ATH6KL_DBG_HTC,
1301 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
1302 packet, packet->info.rx.exp_hdr,
1303 padded_len, dev->ar->mbox_info.htc_addr);
1305 status = hif_read_write_sync(dev->ar,
1306 dev->ar->mbox_info.htc_addr,
1307 packet->buf, padded_len,
1308 HIF_RD_SYNC_BLOCK_FIX);
1310 packet->status = status;
1316 * optimization for recv packets, we can indicate a
1317 * "hint" that there are more single-packets to fetch
1320 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1321 struct htc_endpoint *endpoint,
1322 struct htc_packet *packet)
1324 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1326 if (htc_hdr->eid == packet->endpoint) {
1327 if (!list_empty(&endpoint->rx_bufq))
1328 packet->info.rx.indicat_flags |=
1329 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1333 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1335 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1337 if (ep_cb.rx_refill_thresh > 0) {
1338 spin_lock_bh(&endpoint->target->rx_lock);
1339 if (get_queue_depth(&endpoint->rx_bufq)
1340 < ep_cb.rx_refill_thresh) {
1341 spin_unlock_bh(&endpoint->target->rx_lock);
1342 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1345 spin_unlock_bh(&endpoint->target->rx_lock);
1349 /* This function is called with rx_lock held */
1350 static int ath6kl_htc_rx_setup(struct htc_target *target,
1351 struct htc_endpoint *ep,
1352 u32 *lk_ahds, struct list_head *queue, int n_msg)
1354 struct htc_packet *packet;
1355 /* FIXME: type of lk_ahds can't be right */
1356 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1357 struct htc_ep_callbacks ep_cb;
1358 int status = 0, j, full_len;
1361 full_len = CALC_TXRX_PADDED_LEN(target,
1362 le16_to_cpu(htc_hdr->payld_len) +
1365 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1366 ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1367 htc_hdr->eid, htc_hdr->flags,
1368 le16_to_cpu(htc_hdr->payld_len));
1373 for (j = 0; j < n_msg; j++) {
1376 * Reset flag, any packets allocated using the
1377 * rx_alloc() API cannot be recycled on
1378 * cleanup,they must be explicitly returned.
1382 if (ep_cb.rx_allocthresh &&
1383 (full_len > ep_cb.rx_alloc_thresh)) {
1384 ep->ep_st.rx_alloc_thresh_hit += 1;
1385 ep->ep_st.rxalloc_thresh_byte +=
1386 le16_to_cpu(htc_hdr->payld_len);
1388 spin_unlock_bh(&target->rx_lock);
1391 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1393 spin_lock_bh(&target->rx_lock);
1395 /* refill handler is being used */
1396 if (list_empty(&ep->rx_bufq)) {
1397 if (ep_cb.rx_refill) {
1398 spin_unlock_bh(&target->rx_lock);
1399 ep_cb.rx_refill(ep->target, ep->eid);
1400 spin_lock_bh(&target->rx_lock);
1404 if (list_empty(&ep->rx_bufq))
1407 packet = list_first_entry(&ep->rx_bufq,
1408 struct htc_packet, list);
1409 list_del(&packet->list);
1414 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1415 target->ep_waiting = ep->eid;
1420 packet->info.rx.rx_flags = 0;
1421 packet->info.rx.indicat_flags = 0;
1426 * flag that these packets cannot be
1427 * recycled, they have to be returned to
1430 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1432 /* Caller needs to free this upon any failure */
1433 list_add_tail(&packet->list, queue);
1435 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1436 status = -ECANCELED;
1441 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1442 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1444 /* set expected look ahead */
1445 packet->info.rx.exp_hdr = *lk_ahds;
1447 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1454 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1455 u32 lk_ahds[], int msg,
1456 struct htc_endpoint *endpoint,
1457 struct list_head *queue)
1460 struct htc_packet *packet, *tmp_pkt;
1461 struct htc_frame_hdr *htc_hdr;
1464 spin_lock_bh(&target->rx_lock);
1466 for (i = 0; i < msg; i++) {
1468 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1470 if (htc_hdr->eid >= ENDPOINT_MAX) {
1471 ath6kl_err("invalid ep in look-ahead: %d\n",
1477 if (htc_hdr->eid != endpoint->eid) {
1478 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1479 htc_hdr->eid, endpoint->eid, i);
1484 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1485 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1487 (u32) HTC_MAX_PAYLOAD_LENGTH);
1492 if (endpoint->svc_id == 0) {
1493 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1498 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1500 * HTC header indicates that every packet to follow
1501 * has the same padded length so that it can be
1502 * optimally fetched as a full bundle.
1504 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1505 HTC_FLG_RX_BNDL_CNT_S;
1507 /* the count doesn't include the starter frame */
1509 if (n_msg > target->msg_per_bndl_max) {
1514 endpoint->ep_st.rx_bundle_from_hdr += 1;
1515 ath6kl_dbg(ATH6KL_DBG_HTC,
1516 "htc rx bundle pkts %d\n",
1519 /* HTC header only indicates 1 message to fetch */
1522 /* Setup packet buffers for each message */
1523 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1527 * This is due to unavailabilty of buffers to rx entire data.
1528 * Return no error so that free buffers from queue can be used
1529 * to receive partial data.
1531 if (status == -ENOSPC) {
1532 spin_unlock_bh(&target->rx_lock);
1540 spin_unlock_bh(&target->rx_lock);
1543 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1544 list_del(&packet->list);
1545 htc_reclaim_rxbuf(target, packet,
1546 &target->endpoint[packet->endpoint]);
1553 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1555 if (packets->endpoint != ENDPOINT_0) {
1560 if (packets->status == -ECANCELED) {
1561 reclaim_rx_ctrl_buf(context, packets);
1565 if (packets->act_len > 0) {
1566 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1567 packets->act_len + HTC_HDR_LENGTH);
1569 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1570 "htc rx unexpected endpoint 0 message", "",
1571 packets->buf - HTC_HDR_LENGTH,
1572 packets->act_len + HTC_HDR_LENGTH);
1575 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1578 static void htc_proc_cred_rpt(struct htc_target *target,
1579 struct htc_credit_report *rpt,
1581 enum htc_endpoint_id from_ep)
1583 struct htc_endpoint *endpoint;
1584 int tot_credits = 0, i;
1587 spin_lock_bh(&target->tx_lock);
1589 for (i = 0; i < n_entries; i++, rpt++) {
1590 if (rpt->eid >= ENDPOINT_MAX) {
1592 spin_unlock_bh(&target->tx_lock);
1596 endpoint = &target->endpoint[rpt->eid];
1598 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1599 "credit report ep %d credits %d\n",
1600 rpt->eid, rpt->credits);
1602 endpoint->ep_st.tx_cred_rpt += 1;
1603 endpoint->ep_st.cred_retnd += rpt->credits;
1605 if (from_ep == rpt->eid) {
1607 * This credit report arrived on the same endpoint
1608 * indicating it arrived in an RX packet.
1610 endpoint->ep_st.cred_from_rx += rpt->credits;
1611 endpoint->ep_st.cred_rpt_from_rx += 1;
1612 } else if (from_ep == ENDPOINT_0) {
1613 /* credit arrived on endpoint 0 as a NULL message */
1614 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1615 endpoint->ep_st.cred_rpt_ep0 += 1;
1617 endpoint->ep_st.cred_from_other += rpt->credits;
1618 endpoint->ep_st.cred_rpt_from_other += 1;
1621 if (rpt->eid == ENDPOINT_0)
1622 /* always give endpoint 0 credits back */
1623 endpoint->cred_dist.credits += rpt->credits;
1625 endpoint->cred_dist.cred_to_dist += rpt->credits;
1630 * Refresh tx depth for distribution function that will
1631 * recover these credits NOTE: this is only valid when
1632 * there are credits to recover!
1634 endpoint->cred_dist.txq_depth =
1635 get_queue_depth(&endpoint->txq);
1637 tot_credits += rpt->credits;
1642 * This was a credit return based on a completed send
1643 * operations note, this is done with the lock held
1645 ath6kl_credit_distribute(target->credit_info,
1646 &target->cred_dist_list,
1647 HTC_CREDIT_DIST_SEND_COMPLETE);
1650 spin_unlock_bh(&target->tx_lock);
1653 htc_chk_ep_txq(target);
1656 static int htc_parse_trailer(struct htc_target *target,
1657 struct htc_record_hdr *record,
1658 u8 *record_buf, u32 *next_lk_ahds,
1659 enum htc_endpoint_id endpoint,
1662 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1663 struct htc_lookahead_report *lk_ahd;
1666 switch (record->rec_id) {
1667 case HTC_RECORD_CREDITS:
1668 len = record->len / sizeof(struct htc_credit_report);
1674 htc_proc_cred_rpt(target,
1675 (struct htc_credit_report *) record_buf,
1678 case HTC_RECORD_LOOKAHEAD:
1679 len = record->len / sizeof(*lk_ahd);
1685 lk_ahd = (struct htc_lookahead_report *) record_buf;
1686 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1689 ath6kl_dbg(ATH6KL_DBG_HTC,
1690 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1691 lk_ahd->pre_valid, lk_ahd->post_valid);
1693 /* look ahead bytes are valid, copy them over */
1694 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1696 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1697 "htc rx next look ahead",
1698 "", next_lk_ahds, 4);
1703 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1704 len = record->len / sizeof(*bundle_lkahd_rpt);
1705 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1714 (struct htc_bundle_lkahd_rpt *) record_buf;
1716 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1717 "", record_buf, record->len);
1719 for (i = 0; i < len; i++) {
1720 memcpy((u8 *)&next_lk_ahds[i],
1721 bundle_lkahd_rpt->lk_ahd, 4);
1729 ath6kl_err("unhandled record: id:%d len:%d\n",
1730 record->rec_id, record->len);
1738 static int htc_proc_trailer(struct htc_target *target,
1739 u8 *buf, int len, u32 *next_lk_ahds,
1740 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1742 struct htc_record_hdr *record;
1748 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1749 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1757 if (len < sizeof(struct htc_record_hdr)) {
1761 /* these are byte aligned structs */
1762 record = (struct htc_record_hdr *) buf;
1763 len -= sizeof(struct htc_record_hdr);
1764 buf += sizeof(struct htc_record_hdr);
1766 if (record->len > len) {
1767 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1768 record->len, record->rec_id, len);
1774 status = htc_parse_trailer(target, record, record_buf,
1775 next_lk_ahds, endpoint, n_lk_ahds);
1780 /* advance buffer past this record for next time around */
1786 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1787 "", orig_buf, orig_len);
1792 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1793 struct htc_packet *packet,
1794 u32 *next_lkahds, int *n_lkahds)
1799 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1801 if (n_lkahds != NULL)
1805 * NOTE: we cannot assume the alignment of buf, so we use the safe
1806 * macros to retrieve 16 bit fields.
1808 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1810 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1812 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1814 * Refresh the expected header and the actual length as it
1815 * was unknown when this packet was grabbed as part of the
1818 packet->info.rx.exp_hdr = lk_ahd;
1819 packet->act_len = payload_len + HTC_HDR_LENGTH;
1821 /* validate the actual header that was refreshed */
1822 if (packet->act_len > packet->buf_len) {
1823 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1824 payload_len, lk_ahd);
1826 * Limit this to max buffer just to print out some
1829 packet->act_len = min(packet->act_len, packet->buf_len);
1834 if (packet->endpoint != htc_hdr->eid) {
1835 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1836 htc_hdr->eid, packet->endpoint);
1842 if (lk_ahd != packet->info.rx.exp_hdr) {
1843 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1844 __func__, packet, packet->info.rx.rx_flags);
1845 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1846 "", &packet->info.rx.exp_hdr, 4);
1847 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1848 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1853 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1854 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1855 htc_hdr->ctrl[0] > payload_len) {
1856 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1857 __func__, payload_len, htc_hdr->ctrl[0]);
1862 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1867 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1868 + payload_len - htc_hdr->ctrl[0],
1869 htc_hdr->ctrl[0], next_lkahds,
1870 n_lkahds, packet->endpoint);
1875 packet->act_len -= htc_hdr->ctrl[0];
1878 packet->buf += HTC_HDR_LENGTH;
1879 packet->act_len -= HTC_HDR_LENGTH;
1883 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1884 "", packet->buf, packet->act_len);
1889 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1890 struct htc_packet *packet)
1892 ath6kl_dbg(ATH6KL_DBG_HTC,
1893 "htc rx complete ep %d packet 0x%p\n",
1894 endpoint->eid, packet);
1895 endpoint->ep_cb.rx(endpoint->target, packet);
1898 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1899 struct list_head *rxq,
1900 struct list_head *sync_compq,
1901 int *n_pkt_fetched, bool part_bundle)
1903 struct hif_scatter_req *scat_req;
1904 struct htc_packet *packet;
1905 int rem_space = target->max_rx_bndl_sz;
1906 int n_scat_pkt, status = 0, i, len;
1908 n_scat_pkt = get_queue_depth(rxq);
1909 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1911 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1913 * We were forced to split this bundle receive operation
1914 * all packets in this partial bundle must have their
1915 * lookaheads ignored.
1920 * This would only happen if the target ignored our max
1923 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1924 __func__, get_queue_depth(rxq), n_scat_pkt);
1929 ath6kl_dbg(ATH6KL_DBG_HTC,
1930 "htc rx bundle depth %d pkts %d\n",
1931 get_queue_depth(rxq), n_scat_pkt);
1933 scat_req = hif_scatter_req_get(target->dev->ar);
1935 if (scat_req == NULL)
1938 for (i = 0; i < n_scat_pkt; i++) {
1941 packet = list_first_entry(rxq, struct htc_packet, list);
1942 list_del(&packet->list);
1944 pad_len = CALC_TXRX_PADDED_LEN(target,
1947 if ((rem_space - pad_len) < 0) {
1948 list_add(&packet->list, rxq);
1952 rem_space -= pad_len;
1954 if (part_bundle || (i < (n_scat_pkt - 1)))
1956 * Packet 0..n-1 cannot be checked for look-aheads
1957 * since we are fetching a bundle the last packet
1958 * however can have it's lookahead used
1960 packet->info.rx.rx_flags |=
1961 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1963 /* NOTE: 1 HTC packet per scatter entry */
1964 scat_req->scat_list[i].buf = packet->buf;
1965 scat_req->scat_list[i].len = pad_len;
1967 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1969 list_add_tail(&packet->list, sync_compq);
1971 WARN_ON(!scat_req->scat_list[i].len);
1972 len += scat_req->scat_list[i].len;
1975 scat_req->len = len;
1976 scat_req->scat_entries = i;
1978 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1983 /* free scatter request */
1984 hif_scatter_req_add(target->dev->ar, scat_req);
1991 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1992 struct list_head *comp_pktq,
1996 struct htc_packet *packet, *tmp_pkt;
1997 struct htc_endpoint *ep;
2000 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2001 ep = &target->endpoint[packet->endpoint];
2003 /* process header for each of the recv packet */
2004 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2009 list_del(&packet->list);
2011 if (list_empty(comp_pktq)) {
2013 * Last packet's more packet flag is set
2014 * based on the lookahead.
2017 ath6kl_htc_rx_set_indicate(lk_ahds[0],
2021 * Packets in a bundle automatically have
2024 packet->info.rx.indicat_flags |=
2025 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
2027 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
2029 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
2030 ep->ep_st.rx_bundl += 1;
2032 ath6kl_htc_rx_complete(ep, packet);
2038 static int ath6kl_htc_rx_fetch(struct htc_target *target,
2039 struct list_head *rx_pktq,
2040 struct list_head *comp_pktq)
2043 bool part_bundle = false;
2045 struct list_head tmp_rxq;
2046 struct htc_packet *packet, *tmp_pkt;
2048 /* now go fetch the list of HTC packets */
2049 while (!list_empty(rx_pktq)) {
2052 INIT_LIST_HEAD(&tmp_rxq);
2054 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
2056 * There are enough packets to attempt a
2057 * bundle transfer and recv bundling is
2060 status = ath6kl_htc_rx_bundle(target, rx_pktq,
2067 if (!list_empty(rx_pktq))
2070 list_splice_tail_init(&tmp_rxq, comp_pktq);
2073 if (!fetched_pkts) {
2075 packet = list_first_entry(rx_pktq, struct htc_packet,
2078 /* fully synchronous */
2079 packet->completion = NULL;
2081 if (!list_is_singular(rx_pktq))
2083 * look_aheads in all packet
2084 * except the last one in the
2085 * bundle must be ignored
2087 packet->info.rx.rx_flags |=
2088 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2090 /* go fetch the packet */
2091 status = ath6kl_htc_rx_packet(target, packet,
2094 list_move_tail(&packet->list, &tmp_rxq);
2099 list_splice_tail_init(&tmp_rxq, comp_pktq);
2108 * Cleanup any packets we allocated but didn't use to
2109 * actually fetch any packets.
2112 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2113 list_del(&packet->list);
2114 htc_reclaim_rxbuf(target, packet,
2115 &target->endpoint[packet->endpoint]);
2118 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2119 list_del(&packet->list);
2120 htc_reclaim_rxbuf(target, packet,
2121 &target->endpoint[packet->endpoint]);
2127 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2128 u32 msg_look_ahead, int *num_pkts)
2130 struct htc_packet *packets, *tmp_pkt;
2131 struct htc_endpoint *endpoint;
2132 struct list_head rx_pktq, comp_pktq;
2134 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2135 int num_look_ahead = 1;
2136 enum htc_endpoint_id id;
2139 INIT_LIST_HEAD(&comp_pktq);
2143 * On first entry copy the look_aheads into our temp array for
2146 look_aheads[0] = msg_look_ahead;
2151 * First lookahead sets the expected endpoint IDs for all
2152 * packets in a bundle.
2154 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2155 endpoint = &target->endpoint[id];
2157 if (id >= ENDPOINT_MAX) {
2158 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2164 INIT_LIST_HEAD(&rx_pktq);
2165 INIT_LIST_HEAD(&comp_pktq);
2168 * Try to allocate as many HTC RX packets indicated by the
2171 status = ath6kl_htc_rx_alloc(target, look_aheads,
2172 num_look_ahead, endpoint,
2177 if (get_queue_depth(&rx_pktq) >= 2)
2179 * A recv bundle was detected, force IRQ status
2182 target->chk_irq_status_cnt = 1;
2184 n_fetched += get_queue_depth(&rx_pktq);
2188 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2191 ath6kl_htc_rx_chk_water_mark(endpoint);
2193 /* Process fetched packets */
2194 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2198 if (!num_look_ahead || status)
2202 * For SYNCH processing, if we get here, we are running
2203 * through the loop again due to a detected lookahead. Set
2204 * flag that we should re-check IRQ status registers again
2205 * before leaving IRQ processing, this can net better
2206 * performance in high throughput situations.
2208 target->chk_irq_status_cnt = 1;
2212 ath6kl_err("failed to get pending recv messages: %d\n",
2215 /* cleanup any packets in sync completion queue */
2216 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2217 list_del(&packets->list);
2218 htc_reclaim_rxbuf(target, packets,
2219 &target->endpoint[packets->endpoint]);
2222 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2223 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2224 ath6kl_hif_rx_control(target->dev, false);
2229 * Before leaving, check to see if host ran out of buffers and
2230 * needs to stop the receiver.
2232 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2233 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2234 ath6kl_hif_rx_control(target->dev, false);
2236 *num_pkts = n_fetched;
2242 * Synchronously wait for a control message from the target,
2243 * This function is used at initialization time ONLY. At init messages
2244 * on ENDPOINT 0 are expected.
2246 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2248 struct htc_packet *packet = NULL;
2249 struct htc_frame_hdr *htc_hdr;
2252 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2253 HTC_TARGET_RESPONSE_TIMEOUT))
2256 ath6kl_dbg(ATH6KL_DBG_HTC,
2257 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2259 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2261 if (htc_hdr->eid != ENDPOINT_0)
2264 packet = htc_get_control_buf(target, false);
2269 packet->info.rx.rx_flags = 0;
2270 packet->info.rx.exp_hdr = look_ahead;
2271 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2273 if (packet->act_len > packet->buf_len)
2276 /* we want synchronous operation */
2277 packet->completion = NULL;
2279 /* get the message from the device, this will block */
2280 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2283 /* process receive header */
2284 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2286 if (packet->status) {
2287 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2295 if (packet != NULL) {
2296 htc_rxpkt_reset(packet);
2297 reclaim_rx_ctrl_buf(target, packet);
2303 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2304 struct list_head *pkt_queue)
2306 struct htc_endpoint *endpoint;
2307 struct htc_packet *first_pkt;
2308 bool rx_unblock = false;
2309 int status = 0, depth;
2311 if (list_empty(pkt_queue))
2314 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2316 if (first_pkt->endpoint >= ENDPOINT_MAX)
2319 depth = get_queue_depth(pkt_queue);
2321 ath6kl_dbg(ATH6KL_DBG_HTC,
2322 "htc rx add multiple ep id %d cnt %d len %d\n",
2323 first_pkt->endpoint, depth, first_pkt->buf_len);
2325 endpoint = &target->endpoint[first_pkt->endpoint];
2327 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2328 struct htc_packet *packet, *tmp_pkt;
2330 /* walk through queue and mark each one canceled */
2331 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2332 packet->status = -ECANCELED;
2333 list_del(&packet->list);
2334 ath6kl_htc_rx_complete(endpoint, packet);
2340 spin_lock_bh(&target->rx_lock);
2342 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2344 /* check if we are blocked waiting for a new buffer */
2345 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2346 if (target->ep_waiting == first_pkt->endpoint) {
2347 ath6kl_dbg(ATH6KL_DBG_HTC,
2348 "htc rx blocked on ep %d, unblocking\n",
2349 target->ep_waiting);
2350 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2351 target->ep_waiting = ENDPOINT_MAX;
2356 spin_unlock_bh(&target->rx_lock);
2358 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2359 /* TODO : implement a buffer threshold count? */
2360 ath6kl_hif_rx_control(target->dev, true);
2365 static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2367 struct htc_endpoint *endpoint;
2368 struct htc_packet *packet, *tmp_pkt;
2371 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2372 endpoint = &target->endpoint[i];
2373 if (!endpoint->svc_id)
2377 spin_lock_bh(&target->rx_lock);
2378 list_for_each_entry_safe(packet, tmp_pkt,
2379 &endpoint->rx_bufq, list) {
2380 list_del(&packet->list);
2381 spin_unlock_bh(&target->rx_lock);
2382 ath6kl_dbg(ATH6KL_DBG_HTC,
2383 "htc rx flush pkt 0x%p len %d ep %d\n",
2384 packet, packet->buf_len,
2387 * packets in rx_bufq of endpoint 0 have originally
2388 * been queued from target->free_ctrl_rxbuf where
2389 * packet and packet->buf_start are allocated
2390 * separately using kmalloc(). For other endpoint
2391 * rx_bufq, it is allocated as skb where packet is
2392 * skb->head. Take care of this difference while freeing
2395 if (packet->endpoint == ENDPOINT_0) {
2396 kfree(packet->buf_start);
2399 dev_kfree_skb(packet->pkt_cntxt);
2401 spin_lock_bh(&target->rx_lock);
2403 spin_unlock_bh(&target->rx_lock);
2407 static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2408 struct htc_service_connect_req *conn_req,
2409 struct htc_service_connect_resp *conn_resp)
2411 struct htc_packet *rx_pkt = NULL;
2412 struct htc_packet *tx_pkt = NULL;
2413 struct htc_conn_service_resp *resp_msg;
2414 struct htc_conn_service_msg *conn_msg;
2415 struct htc_endpoint *endpoint;
2416 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2417 unsigned int max_msg_sz = 0;
2421 ath6kl_dbg(ATH6KL_DBG_HTC,
2422 "htc connect service target 0x%p service id 0x%x\n",
2423 target, conn_req->svc_id);
2425 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2426 /* special case for pseudo control service */
2427 assigned_ep = ENDPOINT_0;
2428 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2430 /* allocate a packet to send to the target */
2431 tx_pkt = htc_get_control_buf(target, true);
2436 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2437 memset(conn_msg, 0, sizeof(*conn_msg));
2438 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2439 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2440 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2442 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2443 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2444 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2446 /* we want synchronous operation */
2447 tx_pkt->completion = NULL;
2448 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2449 status = ath6kl_htc_tx_issue(target, tx_pkt);
2454 /* wait for response */
2455 rx_pkt = htc_wait_for_ctrl_msg(target);
2462 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2463 msg_id = le16_to_cpu(resp_msg->msg_id);
2465 if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
2466 (rx_pkt->act_len < sizeof(*resp_msg))) {
2471 conn_resp->resp_code = resp_msg->status;
2472 /* check response status */
2473 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2474 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2475 resp_msg->svc_id, resp_msg->status);
2480 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2481 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2484 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2489 endpoint = &target->endpoint[assigned_ep];
2490 endpoint->eid = assigned_ep;
2491 if (endpoint->svc_id) {
2496 /* return assigned endpoint to caller */
2497 conn_resp->endpoint = assigned_ep;
2498 conn_resp->len_max = max_msg_sz;
2500 /* setup the endpoint */
2502 /* this marks the endpoint in use */
2503 endpoint->svc_id = conn_req->svc_id;
2505 endpoint->max_txq_depth = conn_req->max_txq_depth;
2506 endpoint->len_max = max_msg_sz;
2507 endpoint->ep_cb = conn_req->ep_cb;
2508 endpoint->cred_dist.svc_id = conn_req->svc_id;
2509 endpoint->cred_dist.htc_ep = endpoint;
2510 endpoint->cred_dist.endpoint = assigned_ep;
2511 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2513 switch (endpoint->svc_id) {
2514 case WMI_DATA_BK_SVC:
2515 endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
2518 endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
2522 if (conn_req->max_rxmsg_sz) {
2524 * Override cred_per_msg calculation, this optimizes
2525 * the credit-low indications since the host will actually
2526 * issue smaller messages in the Send path.
2528 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2532 endpoint->cred_dist.cred_per_msg =
2533 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2535 endpoint->cred_dist.cred_per_msg =
2536 max_msg_sz / target->tgt_cred_sz;
2538 if (!endpoint->cred_dist.cred_per_msg)
2539 endpoint->cred_dist.cred_per_msg = 1;
2541 /* save local connection flags */
2542 endpoint->conn_flags = conn_req->flags;
2546 htc_reclaim_txctrl_buf(target, tx_pkt);
2549 htc_rxpkt_reset(rx_pkt);
2550 reclaim_rx_ctrl_buf(target, rx_pkt);
2556 static void reset_ep_state(struct htc_target *target)
2558 struct htc_endpoint *endpoint;
2561 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2562 endpoint = &target->endpoint[i];
2563 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2564 endpoint->svc_id = 0;
2565 endpoint->len_max = 0;
2566 endpoint->max_txq_depth = 0;
2567 memset(&endpoint->ep_st, 0,
2568 sizeof(endpoint->ep_st));
2569 INIT_LIST_HEAD(&endpoint->rx_bufq);
2570 INIT_LIST_HEAD(&endpoint->txq);
2571 endpoint->target = target;
2574 /* reset distribution list */
2575 /* FIXME: free existing entries */
2576 INIT_LIST_HEAD(&target->cred_dist_list);
2579 static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2580 enum htc_endpoint_id endpoint)
2584 spin_lock_bh(&target->rx_lock);
2585 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2586 spin_unlock_bh(&target->rx_lock);
2590 static void htc_setup_msg_bndl(struct htc_target *target)
2592 /* limit what HTC can handle */
2593 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2594 target->msg_per_bndl_max);
2596 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2597 target->msg_per_bndl_max = 0;
2601 /* limit bundle what the device layer can handle */
2602 target->msg_per_bndl_max = min(target->max_scat_entries,
2603 target->msg_per_bndl_max);
2605 ath6kl_dbg(ATH6KL_DBG_BOOT,
2606 "htc bundling allowed msg_per_bndl_max %d\n",
2607 target->msg_per_bndl_max);
2609 /* Max rx bundle size is limited by the max tx bundle size */
2610 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2611 /* Max tx bundle size if limited by the extended mbox address range */
2612 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2613 target->max_xfer_szper_scatreq);
2615 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2616 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2618 if (target->max_tx_bndl_sz)
2619 /* tx_bndl_mask is enabled per AC, each has 1 bit */
2620 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
2622 if (target->max_rx_bndl_sz)
2623 target->rx_bndl_enable = true;
2625 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2626 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2627 target->tgt_cred_sz);
2630 * Disallow send bundling since the credit size is
2631 * not aligned to a block size the I/O block
2632 * padding will spill into the next credit buffer
2635 target->tx_bndl_mask = 0;
2639 static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2641 struct htc_packet *packet = NULL;
2642 struct htc_ready_ext_msg *rdy_msg;
2643 struct htc_service_connect_req connect;
2644 struct htc_service_connect_resp resp;
2647 /* FIXME: remove once USB support is implemented */
2648 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
2649 ath6kl_err("HTC doesn't support USB yet. Patience!\n");
2653 /* we should be getting 1 control message that the target is ready */
2654 packet = htc_wait_for_ctrl_msg(target);
2659 /* we controlled the buffer creation so it's properly aligned */
2660 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2662 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2663 (packet->act_len < sizeof(struct htc_ready_msg))) {
2665 goto fail_wait_target;
2668 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2670 goto fail_wait_target;
2673 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2674 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2676 ath6kl_dbg(ATH6KL_DBG_BOOT,
2677 "htc target ready credits %d size %d\n",
2678 target->tgt_creds, target->tgt_cred_sz);
2680 /* check if this is an extended ready message */
2681 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2682 /* this is an extended message */
2683 target->htc_tgt_ver = rdy_msg->htc_ver;
2684 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2687 target->htc_tgt_ver = HTC_VERSION_2P0;
2688 target->msg_per_bndl_max = 0;
2691 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2692 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2693 target->htc_tgt_ver);
2695 if (target->msg_per_bndl_max > 0)
2696 htc_setup_msg_bndl(target);
2698 /* setup our pseudo HTC control endpoint connection */
2699 memset(&connect, 0, sizeof(connect));
2700 memset(&resp, 0, sizeof(resp));
2701 connect.ep_cb.rx = htc_ctrl_rx;
2702 connect.ep_cb.rx_refill = NULL;
2703 connect.ep_cb.tx_full = NULL;
2704 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2705 connect.svc_id = HTC_CTRL_RSVD_SVC;
2707 /* connect fake service */
2708 status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2712 * FIXME: this call doesn't make sense, the caller should
2713 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2715 ath6kl_hif_cleanup_scatter(target->dev->ar);
2719 htc_rxpkt_reset(packet);
2720 reclaim_rx_ctrl_buf(target, packet);
2727 * Start HTC, enable interrupts and let the target know
2728 * host has finished setup.
2730 static int ath6kl_htc_mbox_start(struct htc_target *target)
2732 struct htc_packet *packet;
2735 memset(&target->dev->irq_proc_reg, 0,
2736 sizeof(target->dev->irq_proc_reg));
2738 /* Disable interrupts at the chip level */
2739 ath6kl_hif_disable_intrs(target->dev);
2741 target->htc_flags = 0;
2742 target->rx_st_flags = 0;
2744 /* Push control receive buffers into htc control endpoint */
2745 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2746 status = htc_add_rxbuf(target, packet);
2751 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2752 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2755 dump_cred_dist_stats(target);
2757 /* Indicate to the target of the setup completion */
2758 status = htc_setup_tx_complete(target);
2763 /* unmask interrupts */
2764 status = ath6kl_hif_unmask_intrs(target->dev);
2767 ath6kl_htc_mbox_stop(target);
2772 static int ath6kl_htc_reset(struct htc_target *target)
2774 u32 block_size, ctrl_bufsz;
2775 struct htc_packet *packet;
2778 reset_ep_state(target);
2780 block_size = target->dev->ar->mbox_info.block_size;
2782 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2783 (block_size + HTC_HDR_LENGTH) :
2784 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2786 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2787 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2791 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2792 if (!packet->buf_start) {
2797 packet->buf_len = ctrl_bufsz;
2798 if (i < NUM_CONTROL_RX_BUFFERS) {
2799 packet->act_len = 0;
2800 packet->buf = packet->buf_start;
2801 packet->endpoint = ENDPOINT_0;
2802 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2804 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2810 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2811 static void ath6kl_htc_mbox_stop(struct htc_target *target)
2813 spin_lock_bh(&target->htc_lock);
2814 target->htc_flags |= HTC_OP_STATE_STOPPING;
2815 spin_unlock_bh(&target->htc_lock);
2818 * Masking interrupts is a synchronous operation, when this
2819 * function returns all pending HIF I/O has completed, we can
2820 * safely flush the queues.
2822 ath6kl_hif_mask_intrs(target->dev);
2824 ath6kl_htc_flush_txep_all(target);
2826 ath6kl_htc_mbox_flush_rx_buf(target);
2828 ath6kl_htc_reset(target);
2831 static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2833 struct htc_target *target = NULL;
2836 target = kzalloc(sizeof(*target), GFP_KERNEL);
2838 ath6kl_err("unable to allocate memory\n");
2842 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2844 ath6kl_err("unable to allocate memory\n");
2846 goto err_htc_cleanup;
2849 spin_lock_init(&target->htc_lock);
2850 spin_lock_init(&target->rx_lock);
2851 spin_lock_init(&target->tx_lock);
2853 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2854 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2855 INIT_LIST_HEAD(&target->cred_dist_list);
2857 target->dev->ar = ar;
2858 target->dev->htc_cnxt = target;
2859 target->ep_waiting = ENDPOINT_MAX;
2861 status = ath6kl_hif_setup(target->dev);
2863 goto err_htc_cleanup;
2865 status = ath6kl_htc_reset(target);
2867 goto err_htc_cleanup;
2872 ath6kl_htc_mbox_cleanup(target);
2877 /* cleanup the HTC instance */
2878 static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2880 struct htc_packet *packet, *tmp_packet;
2882 /* FIXME: remove check once USB support is implemented */
2883 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
2884 ath6kl_hif_cleanup_scatter(target->dev->ar);
2886 list_for_each_entry_safe(packet, tmp_packet,
2887 &target->free_ctrl_txbuf, list) {
2888 list_del(&packet->list);
2889 kfree(packet->buf_start);
2893 list_for_each_entry_safe(packet, tmp_packet,
2894 &target->free_ctrl_rxbuf, list) {
2895 list_del(&packet->list);
2896 kfree(packet->buf_start);
2904 static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2905 .create = ath6kl_htc_mbox_create,
2906 .wait_target = ath6kl_htc_mbox_wait_target,
2907 .start = ath6kl_htc_mbox_start,
2908 .conn_service = ath6kl_htc_mbox_conn_service,
2909 .tx = ath6kl_htc_mbox_tx,
2910 .stop = ath6kl_htc_mbox_stop,
2911 .cleanup = ath6kl_htc_mbox_cleanup,
2912 .flush_txep = ath6kl_htc_mbox_flush_txep,
2913 .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2914 .activity_changed = ath6kl_htc_mbox_activity_changed,
2915 .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2916 .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2917 .credit_setup = ath6kl_htc_mbox_credit_setup,
2920 void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2922 ar->htc_ops = &ath6kl_htc_mbox_ops;