ath6kl: Avoid rolling back of entire scatter setup in case of failure
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / ath / ath6kl / htc.c
1 /*
2  * Copyright (c) 2007-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include "core.h"
18 #include "htc_hif.h"
19 #include "debug.h"
20 #include "hif-ops.h"
21 #include <asm/unaligned.h>
22
23 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
24
25 static void ath6kl_htc_buf_align(u8 **buf, unsigned long len)
26 {
27         u8 *align_addr;
28
29         if (!IS_ALIGNED((unsigned long) *buf, 4)) {
30                 align_addr = PTR_ALIGN(*buf - 4, 4);
31                 memmove(align_addr, *buf, len);
32                 *buf = align_addr;
33         }
34 }
35
36 static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
37                               int ctrl1)
38 {
39         struct htc_frame_hdr *hdr;
40
41         packet->buf -= HTC_HDR_LENGTH;
42         hdr =  (struct htc_frame_hdr *)packet->buf;
43
44         /* Endianess? */
45         put_unaligned((u16)packet->act_len, &hdr->payld_len);
46         hdr->flags = flags;
47         hdr->eid = packet->endpoint;
48         hdr->ctrl[0] = ctrl0;
49         hdr->ctrl[1] = ctrl1;
50 }
51
52 static void htc_reclaim_txctrl_buf(struct htc_target *target,
53                                    struct htc_packet *pkt)
54 {
55         spin_lock_bh(&target->htc_lock);
56         list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
57         spin_unlock_bh(&target->htc_lock);
58 }
59
60 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
61                                               bool tx)
62 {
63         struct htc_packet *packet = NULL;
64         struct list_head *buf_list;
65
66         buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
67
68         spin_lock_bh(&target->htc_lock);
69
70         if (list_empty(buf_list)) {
71                 spin_unlock_bh(&target->htc_lock);
72                 return NULL;
73         }
74
75         packet = list_first_entry(buf_list, struct htc_packet, list);
76         list_del(&packet->list);
77         spin_unlock_bh(&target->htc_lock);
78
79         if (tx)
80                 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
81
82         return packet;
83 }
84
85 static void htc_tx_comp_update(struct htc_target *target,
86                                struct htc_endpoint *endpoint,
87                                struct htc_packet *packet)
88 {
89         packet->completion = NULL;
90         packet->buf += HTC_HDR_LENGTH;
91
92         if (!packet->status)
93                 return;
94
95         ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
96                    packet->status, packet->endpoint, packet->act_len,
97                    packet->info.tx.cred_used);
98
99         /* on failure to submit, reclaim credits for this packet */
100         spin_lock_bh(&target->tx_lock);
101         endpoint->cred_dist.cred_to_dist +=
102                                 packet->info.tx.cred_used;
103         endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
104
105         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
106                    target->cred_dist_cntxt, &target->cred_dist_list);
107
108         ath6k_credit_distribute(target->cred_dist_cntxt,
109                                 &target->cred_dist_list,
110                                 HTC_CREDIT_DIST_SEND_COMPLETE);
111
112         spin_unlock_bh(&target->tx_lock);
113 }
114
115 static void htc_tx_complete(struct htc_endpoint *endpoint,
116                             struct list_head *txq)
117 {
118         if (list_empty(txq))
119                 return;
120
121         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
122                    "send complete ep %d, (%d pkts)\n",
123                    endpoint->eid, get_queue_depth(txq));
124
125         ath6kl_tx_complete(endpoint->target->dev->ar, txq);
126 }
127
128 static void htc_tx_comp_handler(struct htc_target *target,
129                                 struct htc_packet *packet)
130 {
131         struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
132         struct list_head container;
133
134         htc_tx_comp_update(target, endpoint, packet);
135         INIT_LIST_HEAD(&container);
136         list_add_tail(&packet->list, &container);
137         /* do completion */
138         htc_tx_complete(endpoint, &container);
139 }
140
141 static void htc_async_tx_scat_complete(struct htc_target *target,
142                                        struct hif_scatter_req *scat_req)
143 {
144         struct htc_endpoint *endpoint;
145         struct htc_packet *packet;
146         struct list_head tx_compq;
147         int i;
148
149         INIT_LIST_HEAD(&tx_compq);
150
151         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
152                 "htc_async_tx_scat_complete  total len: %d  entries: %d\n",
153                 scat_req->len, scat_req->scat_entries);
154
155         if (scat_req->status)
156                 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
157
158         packet = scat_req->scat_list[0].packet;
159         endpoint = &target->endpoint[packet->endpoint];
160
161         /* walk through the scatter list and process */
162         for (i = 0; i < scat_req->scat_entries; i++) {
163                 packet = scat_req->scat_list[i].packet;
164                 if (!packet) {
165                         WARN_ON(1);
166                         return;
167                 }
168
169                 packet->status = scat_req->status;
170                 htc_tx_comp_update(target, endpoint, packet);
171                 list_add_tail(&packet->list, &tx_compq);
172         }
173
174         /* free scatter request */
175         hif_scatter_req_add(target->dev->ar, scat_req);
176
177         /* complete all packets */
178         htc_tx_complete(endpoint, &tx_compq);
179 }
180
181 static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
182 {
183         int status;
184         bool sync = false;
185         u32 padded_len, send_len;
186
187         if (!packet->completion)
188                 sync = true;
189
190         send_len = packet->act_len + HTC_HDR_LENGTH;
191
192         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
193                    __func__, send_len, sync ? "sync" : "async");
194
195         padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
196
197         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
198                 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
199                 padded_len,
200                 target->dev->ar->mbox_info.htc_addr,
201                 sync ? "sync" : "async");
202
203         if (sync) {
204                 status = hif_read_write_sync(target->dev->ar,
205                                 target->dev->ar->mbox_info.htc_addr,
206                                  packet->buf, padded_len,
207                                  HIF_WR_SYNC_BLOCK_INC);
208
209                 packet->status = status;
210                 packet->buf += HTC_HDR_LENGTH;
211         } else
212                 status = hif_write_async(target->dev->ar,
213                                 target->dev->ar->mbox_info.htc_addr,
214                                 packet->buf, padded_len,
215                                 HIF_WR_ASYNC_BLOCK_INC, packet);
216
217         return status;
218 }
219
220 static int htc_check_credits(struct htc_target *target,
221                              struct htc_endpoint *ep, u8 *flags,
222                              enum htc_endpoint_id eid, unsigned int len,
223                              int *req_cred)
224 {
225
226         *req_cred = (len > target->tgt_cred_sz) ?
227                      DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
228
229         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
230                    *req_cred, ep->cred_dist.credits);
231
232         if (ep->cred_dist.credits < *req_cred) {
233                 if (eid == ENDPOINT_0)
234                         return -EINVAL;
235
236                 /* Seek more credits */
237                 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
238
239                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
240                            target->cred_dist_cntxt, &ep->cred_dist);
241
242                 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
243
244                 ep->cred_dist.seek_cred = 0;
245
246                 if (ep->cred_dist.credits < *req_cred) {
247                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
248                                    "not enough credits for ep %d - leaving packet in queue\n",
249                                    eid);
250                         return -EINVAL;
251                 }
252         }
253
254         ep->cred_dist.credits -= *req_cred;
255         ep->ep_st.cred_cosumd += *req_cred;
256
257          /* When we are getting low on credits, ask for more */
258         if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
259                 ep->cred_dist.seek_cred =
260                 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
261
262                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
263                            target->cred_dist_cntxt, &ep->cred_dist);
264
265                 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
266
267                 /* see if we were successful in getting more */
268                 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
269                         /* tell the target we need credits ASAP! */
270                         *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
271                         ep->ep_st.cred_low_indicate += 1;
272                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
273                 }
274         }
275
276         return 0;
277 }
278
279 static void htc_tx_pkts_get(struct htc_target *target,
280                             struct htc_endpoint *endpoint,
281                             struct list_head *queue)
282 {
283         int req_cred;
284         u8 flags;
285         struct htc_packet *packet;
286         unsigned int len;
287
288         while (true) {
289
290                 flags = 0;
291
292                 if (list_empty(&endpoint->txq))
293                         break;
294                 packet = list_first_entry(&endpoint->txq, struct htc_packet,
295                                           list);
296
297                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
298                         "got head pkt:0x%p , queue depth: %d\n",
299                         packet, get_queue_depth(&endpoint->txq));
300
301                 len = CALC_TXRX_PADDED_LEN(target,
302                                            packet->act_len + HTC_HDR_LENGTH);
303
304                 if (htc_check_credits(target, endpoint, &flags,
305                                       packet->endpoint, len, &req_cred))
306                         break;
307
308                 /* now we can fully move onto caller's queue */
309                 packet = list_first_entry(&endpoint->txq, struct htc_packet,
310                                           list);
311                 list_move_tail(&packet->list, queue);
312
313                 /* save the number of credits this packet consumed */
314                 packet->info.tx.cred_used = req_cred;
315
316                 /* all TX packets are handled asynchronously */
317                 packet->completion = htc_tx_comp_handler;
318                 packet->context = target;
319                 endpoint->ep_st.tx_issued += 1;
320
321                 /* save send flags */
322                 packet->info.tx.flags = flags;
323                 packet->info.tx.seqno = endpoint->seqno;
324                 endpoint->seqno++;
325         }
326 }
327
328 /* See if the padded tx length falls on a credit boundary */
329 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
330                                   struct htc_endpoint *ep)
331 {
332         int rem_cred, cred_pad;
333
334         rem_cred = *len % cred_sz;
335
336         /* No padding needed */
337         if  (!rem_cred)
338                 return 0;
339
340         if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
341                 return -1;
342
343         /*
344          * The transfer consumes a "partial" credit, this
345          * packet cannot be bundled unless we add
346          * additional "dummy" padding (max 255 bytes) to
347          * consume the entire credit.
348          */
349         cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
350
351         if ((cred_pad > 0) && (cred_pad <= 255))
352                 *len += cred_pad;
353         else
354                 /* The amount of padding is too large, send as non-bundled */
355                 return -1;
356
357         return cred_pad;
358 }
359
360 static int htc_setup_send_scat_list(struct htc_target *target,
361                                     struct htc_endpoint *endpoint,
362                                     struct hif_scatter_req *scat_req,
363                                     int n_scat,
364                                     struct list_head *queue)
365 {
366         struct htc_packet *packet;
367         int i, len, rem_scat, cred_pad;
368         int status = 0;
369
370         rem_scat = target->max_tx_bndl_sz;
371
372         for (i = 0; i < n_scat; i++) {
373                 scat_req->scat_list[i].packet = NULL;
374
375                 if (list_empty(queue))
376                         break;
377
378                 packet = list_first_entry(queue, struct htc_packet, list);
379                 len = CALC_TXRX_PADDED_LEN(target,
380                                            packet->act_len + HTC_HDR_LENGTH);
381
382                 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
383                                                   &len, endpoint);
384                 if (cred_pad < 0 || rem_scat < len) {
385                         status = -ENOSPC;
386                         break;
387                 }
388
389                 rem_scat -= len;
390                 /* now remove it from the queue */
391                 packet = list_first_entry(queue, struct htc_packet, list);
392                 list_del(&packet->list);
393
394                 scat_req->scat_list[i].packet = packet;
395                 /* prepare packet and flag message as part of a send bundle */
396                 htc_prep_send_pkt(packet,
397                                 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
398                                 cred_pad, packet->info.tx.seqno);
399                 /* Make sure the buffer is 4-byte aligned */
400                 ath6kl_htc_buf_align(&packet->buf,
401                                      packet->act_len + HTC_HDR_LENGTH);
402                 scat_req->scat_list[i].buf = packet->buf;
403                 scat_req->scat_list[i].len = len;
404
405                 scat_req->len += len;
406                 scat_req->scat_entries++;
407                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
408                            "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
409                            i, packet, len, rem_scat);
410         }
411
412         /* Roll back scatter setup in case of any failure */
413         if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
414                 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
415                         packet = scat_req->scat_list[i].packet;
416                         if (packet) {
417                                 packet->buf += HTC_HDR_LENGTH;
418                                 list_add(&packet->list, queue);
419                         }
420                 }
421                 return -EAGAIN;
422         }
423
424         return status;
425 }
426
427 /*
428  * htc_issue_send_bundle: drain a queue and send as bundles
429  * this function may return without fully draining the queue
430  * when
431  *
432  *    1. scatter resources are exhausted
433  *    2. a message that will consume a partial credit will stop the
434  *    bundling process early
435  *    3. we drop below the minimum number of messages for a bundle
436  */
437 static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
438                                   struct list_head *queue,
439                                   int *sent_bundle, int *n_bundle_pkts)
440 {
441         struct htc_target *target = endpoint->target;
442         struct hif_scatter_req *scat_req = NULL;
443         int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
444         int status;
445
446         while (true) {
447                 status = 0;
448                 n_scat = get_queue_depth(queue);
449                 n_scat = min(n_scat, target->msg_per_bndl_max);
450
451                 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
452                         /* not enough to bundle */
453                         break;
454
455                 scat_req = hif_scatter_req_get(target->dev->ar);
456
457                 if (!scat_req) {
458                         /* no scatter resources  */
459                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
460                                 "no more scatter resources\n");
461                         break;
462                 }
463
464                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
465                            n_scat);
466
467                 scat_req->len = 0;
468                 scat_req->scat_entries = 0;
469
470                 status = htc_setup_send_scat_list(target, endpoint,
471                                                   scat_req, n_scat, queue);
472                 if (status == -EAGAIN) {
473                         hif_scatter_req_add(target->dev->ar, scat_req);
474                         break;
475                 }
476
477                 /* send path is always asynchronous */
478                 scat_req->complete = htc_async_tx_scat_complete;
479                 n_sent_bundle++;
480                 tot_pkts_bundle += scat_req->scat_entries;
481
482                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
483                            "send scatter total bytes: %d , entries: %d\n",
484                            scat_req->len, scat_req->scat_entries);
485                 ath6kldev_submit_scat_req(target->dev, scat_req, false);
486
487                 if (status)
488                         break;
489         }
490
491         *sent_bundle = n_sent_bundle;
492         *n_bundle_pkts = tot_pkts_bundle;
493         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
494                    n_sent_bundle);
495
496         return;
497 }
498
499 static void htc_tx_from_ep_txq(struct htc_target *target,
500                                struct htc_endpoint *endpoint)
501 {
502         struct list_head txq;
503         struct htc_packet *packet;
504         int bundle_sent;
505         int n_pkts_bundle;
506
507         spin_lock_bh(&target->tx_lock);
508
509         endpoint->tx_proc_cnt++;
510         if (endpoint->tx_proc_cnt > 1) {
511                 endpoint->tx_proc_cnt--;
512                 spin_unlock_bh(&target->tx_lock);
513                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
514                 return;
515         }
516
517         /*
518          * drain the endpoint TX queue for transmission as long
519          * as we have enough credits.
520          */
521         INIT_LIST_HEAD(&txq);
522
523         while (true) {
524
525                 if (list_empty(&endpoint->txq))
526                         break;
527
528                 htc_tx_pkts_get(target, endpoint, &txq);
529
530                 if (list_empty(&txq))
531                         break;
532
533                 spin_unlock_bh(&target->tx_lock);
534
535                 bundle_sent = 0;
536                 n_pkts_bundle = 0;
537
538                 while (true) {
539                         /* try to send a bundle on each pass */
540                         if ((target->tx_bndl_enable) &&
541                             (get_queue_depth(&txq) >=
542                             HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
543                                 int temp1 = 0, temp2 = 0;
544
545                                 htc_issue_send_bundle(endpoint, &txq,
546                                                       &temp1, &temp2);
547                                 bundle_sent += temp1;
548                                 n_pkts_bundle += temp2;
549                         }
550
551                         if (list_empty(&txq))
552                                 break;
553
554                         packet = list_first_entry(&txq, struct htc_packet,
555                                                   list);
556                         list_del(&packet->list);
557
558                         htc_prep_send_pkt(packet, packet->info.tx.flags,
559                                           0, packet->info.tx.seqno);
560                         htc_issue_send(target, packet);
561                 }
562
563                 spin_lock_bh(&target->tx_lock);
564
565                 endpoint->ep_st.tx_bundles += bundle_sent;
566                 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
567         }
568
569         endpoint->tx_proc_cnt = 0;
570         spin_unlock_bh(&target->tx_lock);
571 }
572
573 static bool htc_try_send(struct htc_target *target,
574                          struct htc_endpoint *endpoint,
575                          struct htc_packet *tx_pkt)
576 {
577         struct htc_ep_callbacks ep_cb;
578         int txq_depth;
579         bool overflow = false;
580
581         ep_cb = endpoint->ep_cb;
582
583         spin_lock_bh(&target->tx_lock);
584         txq_depth = get_queue_depth(&endpoint->txq);
585         spin_unlock_bh(&target->tx_lock);
586
587         if (txq_depth >= endpoint->max_txq_depth)
588                 overflow = true;
589
590         if (overflow)
591                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
592                            "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
593                            endpoint->eid, overflow, txq_depth,
594                            endpoint->max_txq_depth);
595
596         if (overflow && ep_cb.tx_full) {
597                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
598                            "indicating overflowed tx packet: 0x%p\n", tx_pkt);
599
600                 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
601                     HTC_SEND_FULL_DROP) {
602                         endpoint->ep_st.tx_dropped += 1;
603                         return false;
604                 }
605         }
606
607         spin_lock_bh(&target->tx_lock);
608         list_add_tail(&tx_pkt->list, &endpoint->txq);
609         spin_unlock_bh(&target->tx_lock);
610
611         htc_tx_from_ep_txq(target, endpoint);
612
613         return true;
614 }
615
616 static void htc_chk_ep_txq(struct htc_target *target)
617 {
618         struct htc_endpoint *endpoint;
619         struct htc_endpoint_credit_dist *cred_dist;
620
621         /*
622          * Run through the credit distribution list to see if there are
623          * packets queued. NOTE: no locks need to be taken since the
624          * distribution list is not dynamic (cannot be re-ordered) and we
625          * are not modifying any state.
626          */
627         list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
628                 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
629
630                 spin_lock_bh(&target->tx_lock);
631                 if (!list_empty(&endpoint->txq)) {
632                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
633                                    "ep %d has %d credits and %d packets in tx queue\n",
634                                    cred_dist->endpoint,
635                                    endpoint->cred_dist.credits,
636                                    get_queue_depth(&endpoint->txq));
637                         spin_unlock_bh(&target->tx_lock);
638                         /*
639                          * Try to start the stalled queue, this list is
640                          * ordered by priority. If there are credits
641                          * available the highest priority queue will get a
642                          * chance to reclaim credits from lower priority
643                          * ones.
644                          */
645                         htc_tx_from_ep_txq(target, endpoint);
646                         spin_lock_bh(&target->tx_lock);
647                 }
648                 spin_unlock_bh(&target->tx_lock);
649         }
650 }
651
652 static int htc_setup_tx_complete(struct htc_target *target)
653 {
654         struct htc_packet *send_pkt = NULL;
655         int status;
656
657         send_pkt = htc_get_control_buf(target, true);
658
659         if (!send_pkt)
660                 return -ENOMEM;
661
662         if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
663                 struct htc_setup_comp_ext_msg *setup_comp_ext;
664                 u32 flags = 0;
665
666                 setup_comp_ext =
667                     (struct htc_setup_comp_ext_msg *)send_pkt->buf;
668                 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
669                 setup_comp_ext->msg_id =
670                         cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
671
672                 if (target->msg_per_bndl_max > 0) {
673                         /* Indicate HTC bundling to the target */
674                         flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
675                         setup_comp_ext->msg_per_rxbndl =
676                                                 target->msg_per_bndl_max;
677                 }
678
679                 memcpy(&setup_comp_ext->flags, &flags,
680                        sizeof(setup_comp_ext->flags));
681                 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
682                                        sizeof(struct htc_setup_comp_ext_msg),
683                                        ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
684
685         } else {
686                 struct htc_setup_comp_msg *setup_comp;
687                 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
688                 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
689                 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
690                 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
691                                        sizeof(struct htc_setup_comp_msg),
692                                        ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
693         }
694
695         /* we want synchronous operation */
696         send_pkt->completion = NULL;
697         htc_prep_send_pkt(send_pkt, 0, 0, 0);
698         status = htc_issue_send(target, send_pkt);
699
700         if (send_pkt != NULL)
701                 htc_reclaim_txctrl_buf(target, send_pkt);
702
703         return status;
704 }
705
706 void ath6kl_htc_set_credit_dist(struct htc_target *target,
707                                 struct htc_credit_state_info *cred_dist_cntxt,
708                                 u16 srvc_pri_order[], int list_len)
709 {
710         struct htc_endpoint *endpoint;
711         int i, ep;
712
713         target->cred_dist_cntxt = cred_dist_cntxt;
714
715         list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
716                       &target->cred_dist_list);
717
718         for (i = 0; i < list_len; i++) {
719                 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
720                         endpoint = &target->endpoint[ep];
721                         if (endpoint->svc_id == srvc_pri_order[i]) {
722                                 list_add_tail(&endpoint->cred_dist.list,
723                                               &target->cred_dist_list);
724                                 break;
725                         }
726                 }
727                 if (ep >= ENDPOINT_MAX) {
728                         WARN_ON(1);
729                         return;
730                 }
731         }
732 }
733
734 int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
735 {
736         struct htc_endpoint *endpoint;
737         struct list_head queue;
738
739         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
740                    "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
741                    packet->endpoint, packet->buf, packet->act_len);
742
743         if (packet->endpoint >= ENDPOINT_MAX) {
744                 WARN_ON(1);
745                 return -EINVAL;
746         }
747
748         endpoint = &target->endpoint[packet->endpoint];
749
750         if (!htc_try_send(target, endpoint, packet)) {
751                 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
752                                  -ECANCELED : -ENOSPC;
753                 INIT_LIST_HEAD(&queue);
754                 list_add(&packet->list, &queue);
755                 htc_tx_complete(endpoint, &queue);
756         }
757
758         return 0;
759 }
760
761 /* flush endpoint TX queue */
762 void ath6kl_htc_flush_txep(struct htc_target *target,
763                            enum htc_endpoint_id eid, u16 tag)
764 {
765         struct htc_packet *packet, *tmp_pkt;
766         struct list_head discard_q, container;
767         struct htc_endpoint *endpoint = &target->endpoint[eid];
768
769         if (!endpoint->svc_id) {
770                 WARN_ON(1);
771                 return;
772         }
773
774         /* initialize the discard queue */
775         INIT_LIST_HEAD(&discard_q);
776
777         spin_lock_bh(&target->tx_lock);
778
779         list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
780                 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
781                     (tag == packet->info.tx.tag))
782                         list_move_tail(&packet->list, &discard_q);
783         }
784
785         spin_unlock_bh(&target->tx_lock);
786
787         list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
788                 packet->status = -ECANCELED;
789                 list_del(&packet->list);
790                 ath6kl_dbg(ATH6KL_DBG_TRC,
791                         "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
792                         packet, packet->act_len,
793                         packet->endpoint, packet->info.tx.tag);
794
795                 INIT_LIST_HEAD(&container);
796                 list_add_tail(&packet->list, &container);
797                 htc_tx_complete(endpoint, &container);
798         }
799
800 }
801
802 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
803 {
804         struct htc_endpoint *endpoint;
805         int i;
806
807         dump_cred_dist_stats(target);
808
809         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
810                 endpoint = &target->endpoint[i];
811                 if (endpoint->svc_id == 0)
812                         /* not in use.. */
813                         continue;
814                 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
815         }
816 }
817
818 void ath6kl_htc_indicate_activity_change(struct htc_target *target,
819                                          enum htc_endpoint_id eid, bool active)
820 {
821         struct htc_endpoint *endpoint = &target->endpoint[eid];
822         bool dist = false;
823
824         if (endpoint->svc_id == 0) {
825                 WARN_ON(1);
826                 return;
827         }
828
829         spin_lock_bh(&target->tx_lock);
830
831         if (active) {
832                 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
833                         endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
834                         dist = true;
835                 }
836         } else {
837                 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
838                         endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
839                         dist = true;
840                 }
841         }
842
843         if (dist) {
844                 endpoint->cred_dist.txq_depth =
845                         get_queue_depth(&endpoint->txq);
846
847                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
848                            target->cred_dist_cntxt, &target->cred_dist_list);
849
850                 ath6k_credit_distribute(target->cred_dist_cntxt,
851                                         &target->cred_dist_list,
852                                         HTC_CREDIT_DIST_ACTIVITY_CHANGE);
853         }
854
855         spin_unlock_bh(&target->tx_lock);
856
857         if (dist && !active)
858                 htc_chk_ep_txq(target);
859 }
860
861 /* HTC Rx */
862
863 static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
864                                        int n_look_ahds)
865 {
866         endpoint->ep_st.rx_pkts++;
867         if (n_look_ahds == 1)
868                 endpoint->ep_st.rx_lkahds++;
869         else if (n_look_ahds > 1)
870                 endpoint->ep_st.rx_bundle_lkahd++;
871 }
872
873 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
874                                           enum htc_endpoint_id eid, int len)
875 {
876         return (eid == target->dev->ar->ctrl_ep) ?
877                 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
878 }
879
880 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
881 {
882         struct list_head queue;
883
884         INIT_LIST_HEAD(&queue);
885         list_add_tail(&packet->list, &queue);
886         return ath6kl_htc_add_rxbuf_multiple(target, &queue);
887 }
888
889 static void htc_reclaim_rxbuf(struct htc_target *target,
890                               struct htc_packet *packet,
891                               struct htc_endpoint *ep)
892 {
893         if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
894                 htc_rxpkt_reset(packet);
895                 packet->status = -ECANCELED;
896                 ep->ep_cb.rx(ep->target, packet);
897         } else {
898                 htc_rxpkt_reset(packet);
899                 htc_add_rxbuf((void *)(target), packet);
900         }
901 }
902
903 static void reclaim_rx_ctrl_buf(struct htc_target *target,
904                                 struct htc_packet *packet)
905 {
906         spin_lock_bh(&target->htc_lock);
907         list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
908         spin_unlock_bh(&target->htc_lock);
909 }
910
911 static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
912                       u32 rx_len)
913 {
914         struct ath6kl_device *dev = target->dev;
915         u32 padded_len;
916         int status;
917
918         padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
919
920         if (padded_len > packet->buf_len) {
921                 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
922                            padded_len, rx_len, packet->buf_len);
923                 return -ENOMEM;
924         }
925
926         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
927                    "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
928                    packet, packet->info.rx.exp_hdr,
929                    padded_len, dev->ar->mbox_info.htc_addr, "sync");
930
931         status = hif_read_write_sync(dev->ar,
932                                      dev->ar->mbox_info.htc_addr,
933                                      packet->buf, padded_len,
934                                      HIF_RD_SYNC_BLOCK_FIX);
935
936         packet->status = status;
937
938         return status;
939 }
940
941 /*
942  * optimization for recv packets, we can indicate a
943  * "hint" that there are more  single-packets to fetch
944  * on this endpoint.
945  */
946 static void set_rxpkt_indication_flag(u32 lk_ahd,
947                                       struct htc_endpoint *endpoint,
948                                       struct htc_packet *packet)
949 {
950         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
951
952         if (htc_hdr->eid == packet->endpoint) {
953                 if (!list_empty(&endpoint->rx_bufq))
954                         packet->info.rx.indicat_flags |=
955                                         HTC_RX_FLAGS_INDICATE_MORE_PKTS;
956         }
957 }
958
959 static void chk_rx_water_mark(struct htc_endpoint *endpoint)
960 {
961         struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
962
963         if (ep_cb.rx_refill_thresh > 0) {
964                 spin_lock_bh(&endpoint->target->rx_lock);
965                 if (get_queue_depth(&endpoint->rx_bufq)
966                     < ep_cb.rx_refill_thresh) {
967                         spin_unlock_bh(&endpoint->target->rx_lock);
968                         ep_cb.rx_refill(endpoint->target, endpoint->eid);
969                         return;
970                 }
971                 spin_unlock_bh(&endpoint->target->rx_lock);
972         }
973 }
974
975 /* This function is called with rx_lock held */
976 static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
977                             u32 *lk_ahds, struct list_head *queue, int n_msg)
978 {
979         struct htc_packet *packet;
980         /* FIXME: type of lk_ahds can't be right */
981         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
982         struct htc_ep_callbacks ep_cb;
983         int status = 0, j, full_len;
984         bool no_recycle;
985
986         full_len = CALC_TXRX_PADDED_LEN(target,
987                                         le16_to_cpu(htc_hdr->payld_len) +
988                                         sizeof(*htc_hdr));
989
990         if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
991                 ath6kl_warn("Rx buffer requested with invalid length\n");
992                 return -EINVAL;
993         }
994
995         ep_cb = ep->ep_cb;
996         for (j = 0; j < n_msg; j++) {
997
998                 /*
999                  * Reset flag, any packets allocated using the
1000                  * rx_alloc() API cannot be recycled on
1001                  * cleanup,they must be explicitly returned.
1002                  */
1003                 no_recycle = false;
1004
1005                 if (ep_cb.rx_allocthresh &&
1006                     (full_len > ep_cb.rx_alloc_thresh)) {
1007                         ep->ep_st.rx_alloc_thresh_hit += 1;
1008                         ep->ep_st.rxalloc_thresh_byte +=
1009                                 le16_to_cpu(htc_hdr->payld_len);
1010
1011                         spin_unlock_bh(&target->rx_lock);
1012                         no_recycle = true;
1013
1014                         packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1015                                                       full_len);
1016                         spin_lock_bh(&target->rx_lock);
1017                 } else {
1018                         /* refill handler is being used */
1019                         if (list_empty(&ep->rx_bufq)) {
1020                                 if (ep_cb.rx_refill) {
1021                                         spin_unlock_bh(&target->rx_lock);
1022                                         ep_cb.rx_refill(ep->target, ep->eid);
1023                                         spin_lock_bh(&target->rx_lock);
1024                                 }
1025                         }
1026
1027                         if (list_empty(&ep->rx_bufq))
1028                                 packet = NULL;
1029                         else {
1030                                 packet = list_first_entry(&ep->rx_bufq,
1031                                                 struct htc_packet, list);
1032                                 list_del(&packet->list);
1033                         }
1034                 }
1035
1036                 if (!packet) {
1037                         target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1038                         target->ep_waiting = ep->eid;
1039                         return -ENOSPC;
1040                 }
1041
1042                 /* clear flags */
1043                 packet->info.rx.rx_flags = 0;
1044                 packet->info.rx.indicat_flags = 0;
1045                 packet->status = 0;
1046
1047                 if (no_recycle)
1048                         /*
1049                          * flag that these packets cannot be
1050                          * recycled, they have to be returned to
1051                          * the user
1052                          */
1053                         packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1054
1055                 /* Caller needs to free this upon any failure */
1056                 list_add_tail(&packet->list, queue);
1057
1058                 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1059                         status = -ECANCELED;
1060                         break;
1061                 }
1062
1063                 if (j) {
1064                         packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1065                         packet->info.rx.exp_hdr = 0xFFFFFFFF;
1066                 } else
1067                         /* set expected look ahead */
1068                         packet->info.rx.exp_hdr = *lk_ahds;
1069
1070                 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1071                         HTC_HDR_LENGTH;
1072         }
1073
1074         return status;
1075 }
1076
1077 static int alloc_and_prep_rxpkts(struct htc_target *target,
1078                                  u32 lk_ahds[], int msg,
1079                                  struct htc_endpoint *endpoint,
1080                                  struct list_head *queue)
1081 {
1082         int status = 0;
1083         struct htc_packet *packet, *tmp_pkt;
1084         struct htc_frame_hdr *htc_hdr;
1085         int i, n_msg;
1086
1087         spin_lock_bh(&target->rx_lock);
1088
1089         for (i = 0; i < msg; i++) {
1090
1091                 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1092
1093                 if (htc_hdr->eid >= ENDPOINT_MAX) {
1094                         ath6kl_err("invalid ep in look-ahead: %d\n",
1095                                    htc_hdr->eid);
1096                         status = -ENOMEM;
1097                         break;
1098                 }
1099
1100                 if (htc_hdr->eid != endpoint->eid) {
1101                         ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1102                                    htc_hdr->eid, endpoint->eid, i);
1103                         status = -ENOMEM;
1104                         break;
1105                 }
1106
1107                 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1108                         ath6kl_err("payload len %d exceeds max htc : %d !\n",
1109                                    htc_hdr->payld_len,
1110                                    (u32) HTC_MAX_PAYLOAD_LENGTH);
1111                         status = -ENOMEM;
1112                         break;
1113                 }
1114
1115                 if (endpoint->svc_id == 0) {
1116                         ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1117                         status = -ENOMEM;
1118                         break;
1119                 }
1120
1121                 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1122                         /*
1123                          * HTC header indicates that every packet to follow
1124                          * has the same padded length so that it can be
1125                          * optimally fetched as a full bundle.
1126                          */
1127                         n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1128                                 HTC_FLG_RX_BNDL_CNT_S;
1129
1130                         /* the count doesn't include the starter frame */
1131                         n_msg++;
1132                         if (n_msg > target->msg_per_bndl_max) {
1133                                 status = -ENOMEM;
1134                                 break;
1135                         }
1136
1137                         endpoint->ep_st.rx_bundle_from_hdr += 1;
1138                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1139                                    "htc hdr indicates :%d msg can be fetched as a bundle\n",
1140                                    n_msg);
1141                 } else
1142                         /* HTC header only indicates 1 message to fetch */
1143                         n_msg = 1;
1144
1145                 /* Setup packet buffers for each message */
1146                 status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
1147                                           n_msg);
1148
1149                 /*
1150                  * This is due to unavailabilty of buffers to rx entire data.
1151                  * Return no error so that free buffers from queue can be used
1152                  * to receive partial data.
1153                  */
1154                 if (status == -ENOSPC) {
1155                         spin_unlock_bh(&target->rx_lock);
1156                         return 0;
1157                 }
1158
1159                 if (status)
1160                         break;
1161         }
1162
1163         spin_unlock_bh(&target->rx_lock);
1164
1165         if (status) {
1166                 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1167                         list_del(&packet->list);
1168                         htc_reclaim_rxbuf(target, packet,
1169                                           &target->endpoint[packet->endpoint]);
1170                 }
1171         }
1172
1173         return status;
1174 }
1175
1176 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1177 {
1178         if (packets->endpoint != ENDPOINT_0) {
1179                 WARN_ON(1);
1180                 return;
1181         }
1182
1183         if (packets->status == -ECANCELED) {
1184                 reclaim_rx_ctrl_buf(context, packets);
1185                 return;
1186         }
1187
1188         if (packets->act_len > 0) {
1189                 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1190                         packets->act_len + HTC_HDR_LENGTH);
1191
1192                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1193                              "Unexpected ENDPOINT 0 Message",
1194                              packets->buf - HTC_HDR_LENGTH,
1195                              packets->act_len + HTC_HDR_LENGTH);
1196         }
1197
1198         htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1199 }
1200
1201 static void htc_proc_cred_rpt(struct htc_target *target,
1202                               struct htc_credit_report *rpt,
1203                               int n_entries,
1204                               enum htc_endpoint_id from_ep)
1205 {
1206         struct htc_endpoint *endpoint;
1207         int tot_credits = 0, i;
1208         bool dist = false;
1209
1210         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1211                    "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1212
1213         spin_lock_bh(&target->tx_lock);
1214
1215         for (i = 0; i < n_entries; i++, rpt++) {
1216                 if (rpt->eid >= ENDPOINT_MAX) {
1217                         WARN_ON(1);
1218                         spin_unlock_bh(&target->tx_lock);
1219                         return;
1220                 }
1221
1222                 endpoint = &target->endpoint[rpt->eid];
1223
1224                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1225                         rpt->eid, rpt->credits);
1226
1227                 endpoint->ep_st.tx_cred_rpt += 1;
1228                 endpoint->ep_st.cred_retnd += rpt->credits;
1229
1230                 if (from_ep == rpt->eid) {
1231                         /*
1232                          * This credit report arrived on the same endpoint
1233                          * indicating it arrived in an RX packet.
1234                          */
1235                         endpoint->ep_st.cred_from_rx += rpt->credits;
1236                         endpoint->ep_st.cred_rpt_from_rx += 1;
1237                 } else if (from_ep == ENDPOINT_0) {
1238                         /* credit arrived on endpoint 0 as a NULL message */
1239                         endpoint->ep_st.cred_from_ep0 += rpt->credits;
1240                         endpoint->ep_st.cred_rpt_ep0 += 1;
1241                 } else {
1242                         endpoint->ep_st.cred_from_other += rpt->credits;
1243                         endpoint->ep_st.cred_rpt_from_other += 1;
1244                 }
1245
1246                 if (rpt->eid == ENDPOINT_0)
1247                         /* always give endpoint 0 credits back */
1248                         endpoint->cred_dist.credits += rpt->credits;
1249                 else {
1250                         endpoint->cred_dist.cred_to_dist += rpt->credits;
1251                         dist = true;
1252                 }
1253
1254                 /*
1255                  * Refresh tx depth for distribution function that will
1256                  * recover these credits NOTE: this is only valid when
1257                  * there are credits to recover!
1258                  */
1259                 endpoint->cred_dist.txq_depth =
1260                         get_queue_depth(&endpoint->txq);
1261
1262                 tot_credits += rpt->credits;
1263         }
1264
1265         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1266                    "report indicated %d credits to distribute\n",
1267                    tot_credits);
1268
1269         if (dist) {
1270                 /*
1271                  * This was a credit return based on a completed send
1272                  * operations note, this is done with the lock held
1273                  */
1274                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1275                            target->cred_dist_cntxt, &target->cred_dist_list);
1276
1277                 ath6k_credit_distribute(target->cred_dist_cntxt,
1278                                         &target->cred_dist_list,
1279                                         HTC_CREDIT_DIST_SEND_COMPLETE);
1280         }
1281
1282         spin_unlock_bh(&target->tx_lock);
1283
1284         if (tot_credits)
1285                 htc_chk_ep_txq(target);
1286 }
1287
1288 static int htc_parse_trailer(struct htc_target *target,
1289                              struct htc_record_hdr *record,
1290                              u8 *record_buf, u32 *next_lk_ahds,
1291                              enum htc_endpoint_id endpoint,
1292                              int *n_lk_ahds)
1293 {
1294         struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1295         struct htc_lookahead_report *lk_ahd;
1296         int len;
1297
1298         switch (record->rec_id) {
1299         case HTC_RECORD_CREDITS:
1300                 len = record->len / sizeof(struct htc_credit_report);
1301                 if (!len) {
1302                         WARN_ON(1);
1303                         return -EINVAL;
1304                 }
1305
1306                 htc_proc_cred_rpt(target,
1307                                   (struct htc_credit_report *) record_buf,
1308                                   len, endpoint);
1309                 break;
1310         case HTC_RECORD_LOOKAHEAD:
1311                 len = record->len / sizeof(*lk_ahd);
1312                 if (!len) {
1313                         WARN_ON(1);
1314                         return -EINVAL;
1315                 }
1316
1317                 lk_ahd = (struct htc_lookahead_report *) record_buf;
1318                 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1319                     && next_lk_ahds) {
1320
1321                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1322                                    "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1323                                    lk_ahd->pre_valid, lk_ahd->post_valid);
1324
1325                         /* look ahead bytes are valid, copy them over */
1326                         memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1327
1328                         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
1329                                         next_lk_ahds, 4);
1330
1331                         *n_lk_ahds = 1;
1332                 }
1333                 break;
1334         case HTC_RECORD_LOOKAHEAD_BUNDLE:
1335                 len = record->len / sizeof(*bundle_lkahd_rpt);
1336                 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1337                         WARN_ON(1);
1338                         return -EINVAL;
1339                 }
1340
1341                 if (next_lk_ahds) {
1342                         int i;
1343
1344                         bundle_lkahd_rpt =
1345                                 (struct htc_bundle_lkahd_rpt *) record_buf;
1346
1347                         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
1348                                         record_buf, record->len);
1349
1350                         for (i = 0; i < len; i++) {
1351                                 memcpy((u8 *)&next_lk_ahds[i],
1352                                        bundle_lkahd_rpt->lk_ahd, 4);
1353                                 bundle_lkahd_rpt++;
1354                         }
1355
1356                         *n_lk_ahds = i;
1357                 }
1358                 break;
1359         default:
1360                 ath6kl_err("unhandled record: id:%d len:%d\n",
1361                            record->rec_id, record->len);
1362                 break;
1363         }
1364
1365         return 0;
1366
1367 }
1368
1369 static int htc_proc_trailer(struct htc_target *target,
1370                             u8 *buf, int len, u32 *next_lk_ahds,
1371                             int *n_lk_ahds, enum htc_endpoint_id endpoint)
1372 {
1373         struct htc_record_hdr *record;
1374         int orig_len;
1375         int status;
1376         u8 *record_buf;
1377         u8 *orig_buf;
1378
1379         ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1380
1381         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
1382
1383         orig_buf = buf;
1384         orig_len = len;
1385         status = 0;
1386
1387         while (len > 0) {
1388
1389                 if (len < sizeof(struct htc_record_hdr)) {
1390                         status = -ENOMEM;
1391                         break;
1392                 }
1393                 /* these are byte aligned structs */
1394                 record = (struct htc_record_hdr *) buf;
1395                 len -= sizeof(struct htc_record_hdr);
1396                 buf += sizeof(struct htc_record_hdr);
1397
1398                 if (record->len > len) {
1399                         ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1400                                    record->len, record->rec_id, len);
1401                         status = -ENOMEM;
1402                         break;
1403                 }
1404                 record_buf = buf;
1405
1406                 status = htc_parse_trailer(target, record, record_buf,
1407                                            next_lk_ahds, endpoint, n_lk_ahds);
1408
1409                 if (status)
1410                         break;
1411
1412                 /* advance buffer past this record for next time around */
1413                 buf += record->len;
1414                 len -= record->len;
1415         }
1416
1417         if (status)
1418                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
1419                                 orig_buf, orig_len);
1420
1421         return status;
1422 }
1423
1424 static int htc_proc_rxhdr(struct htc_target *target,
1425                           struct htc_packet *packet,
1426                           u32 *next_lkahds, int *n_lkahds)
1427 {
1428         int status = 0;
1429         u16 payload_len;
1430         u32 lk_ahd;
1431         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1432
1433         if (n_lkahds != NULL)
1434                 *n_lkahds = 0;
1435
1436         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
1437                         packet->act_len);
1438
1439         /*
1440          * NOTE: we cannot assume the alignment of buf, so we use the safe
1441          * macros to retrieve 16 bit fields.
1442          */
1443         payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1444
1445         memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1446
1447         if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1448                 /*
1449                  * Refresh the expected header and the actual length as it
1450                  * was unknown when this packet was grabbed as part of the
1451                  * bundle.
1452                  */
1453                 packet->info.rx.exp_hdr = lk_ahd;
1454                 packet->act_len = payload_len + HTC_HDR_LENGTH;
1455
1456                 /* validate the actual header that was refreshed  */
1457                 if (packet->act_len > packet->buf_len) {
1458                         ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1459                                    payload_len, lk_ahd);
1460                         /*
1461                          * Limit this to max buffer just to print out some
1462                          * of the buffer.
1463                          */
1464                         packet->act_len = min(packet->act_len, packet->buf_len);
1465                         status = -ENOMEM;
1466                         goto fail_rx;
1467                 }
1468
1469                 if (packet->endpoint != htc_hdr->eid) {
1470                         ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1471                                    htc_hdr->eid, packet->endpoint);
1472                         status = -ENOMEM;
1473                         goto fail_rx;
1474                 }
1475         }
1476
1477         if (lk_ahd != packet->info.rx.exp_hdr) {
1478                 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1479                            packet, packet->info.rx.rx_flags);
1480                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
1481                                 &packet->info.rx.exp_hdr, 4);
1482                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
1483                                 (u8 *)&lk_ahd, sizeof(lk_ahd));
1484                 status = -ENOMEM;
1485                 goto fail_rx;
1486         }
1487
1488         if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1489                 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1490                     htc_hdr->ctrl[0] > payload_len) {
1491                         ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1492                                    payload_len, htc_hdr->ctrl[0]);
1493                         status = -ENOMEM;
1494                         goto fail_rx;
1495                 }
1496
1497                 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1498                         next_lkahds = NULL;
1499                         n_lkahds = NULL;
1500                 }
1501
1502                 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1503                                           + payload_len - htc_hdr->ctrl[0],
1504                                           htc_hdr->ctrl[0], next_lkahds,
1505                                            n_lkahds, packet->endpoint);
1506
1507                 if (status)
1508                         goto fail_rx;
1509
1510                 packet->act_len -= htc_hdr->ctrl[0];
1511         }
1512
1513         packet->buf += HTC_HDR_LENGTH;
1514         packet->act_len -= HTC_HDR_LENGTH;
1515
1516 fail_rx:
1517         if (status)
1518                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
1519                                 packet->buf,
1520                                 packet->act_len < 256 ? packet->act_len : 256);
1521         else {
1522                 if (packet->act_len > 0)
1523                         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1524                                         "HTC - Application Msg",
1525                                         packet->buf, packet->act_len);
1526         }
1527
1528         return status;
1529 }
1530
1531 static void do_rx_completion(struct htc_endpoint *endpoint,
1532                              struct htc_packet *packet)
1533 {
1534                 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1535                            "htc calling ep %d recv callback on packet 0x%p\n",
1536                            endpoint->eid, packet);
1537                 endpoint->ep_cb.rx(endpoint->target, packet);
1538 }
1539
1540 static int htc_issue_rxpkt_bundle(struct htc_target *target,
1541                                   struct list_head *rxq,
1542                                   struct list_head *sync_compq,
1543                                   int *n_pkt_fetched, bool part_bundle)
1544 {
1545         struct hif_scatter_req *scat_req;
1546         struct htc_packet *packet;
1547         int rem_space = target->max_rx_bndl_sz;
1548         int n_scat_pkt, status = 0, i, len;
1549
1550         n_scat_pkt = get_queue_depth(rxq);
1551         n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1552
1553         if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1554                 /*
1555                  * We were forced to split this bundle receive operation
1556                  * all packets in this partial bundle must have their
1557                  * lookaheads ignored.
1558                  */
1559                 part_bundle = true;
1560
1561                 /*
1562                  * This would only happen if the target ignored our max
1563                  * bundle limit.
1564                  */
1565                 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1566                             get_queue_depth(rxq), n_scat_pkt);
1567         }
1568
1569         len = 0;
1570
1571         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1572                 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1573                 get_queue_depth(rxq), n_scat_pkt);
1574
1575         scat_req = hif_scatter_req_get(target->dev->ar);
1576
1577         if (scat_req == NULL)
1578                 goto fail_rx_pkt;
1579
1580         for (i = 0; i < n_scat_pkt; i++) {
1581                 int pad_len;
1582
1583                 packet = list_first_entry(rxq, struct htc_packet, list);
1584                 list_del(&packet->list);
1585
1586                 pad_len = CALC_TXRX_PADDED_LEN(target,
1587                                                    packet->act_len);
1588
1589                 if ((rem_space - pad_len) < 0) {
1590                         list_add(&packet->list, rxq);
1591                         break;
1592                 }
1593
1594                 rem_space -= pad_len;
1595
1596                 if (part_bundle || (i < (n_scat_pkt - 1)))
1597                         /*
1598                          * Packet 0..n-1 cannot be checked for look-aheads
1599                          * since we are fetching a bundle the last packet
1600                          * however can have it's lookahead used
1601                          */
1602                         packet->info.rx.rx_flags |=
1603                             HTC_RX_PKT_IGNORE_LOOKAHEAD;
1604
1605                 /* NOTE: 1 HTC packet per scatter entry */
1606                 scat_req->scat_list[i].buf = packet->buf;
1607                 scat_req->scat_list[i].len = pad_len;
1608
1609                 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1610
1611                 list_add_tail(&packet->list, sync_compq);
1612
1613                 WARN_ON(!scat_req->scat_list[i].len);
1614                 len += scat_req->scat_list[i].len;
1615         }
1616
1617         scat_req->len = len;
1618         scat_req->scat_entries = i;
1619
1620         status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1621
1622         if (!status)
1623                 *n_pkt_fetched = i;
1624
1625         /* free scatter request */
1626         hif_scatter_req_add(target->dev->ar, scat_req);
1627
1628 fail_rx_pkt:
1629
1630         return status;
1631 }
1632
1633 static int htc_proc_fetched_rxpkts(struct htc_target *target,
1634                                    struct list_head *comp_pktq, u32 lk_ahds[],
1635                                    int *n_lk_ahd)
1636 {
1637         struct htc_packet *packet, *tmp_pkt;
1638         struct htc_endpoint *ep;
1639         int status = 0;
1640
1641         list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1642                 list_del(&packet->list);
1643                 ep = &target->endpoint[packet->endpoint];
1644
1645                 /* process header for each of the recv packet */
1646                 status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
1647                 if (status)
1648                         return status;
1649
1650                 if (list_empty(comp_pktq)) {
1651                         /*
1652                          * Last packet's more packet flag is set
1653                          * based on the lookahead.
1654                          */
1655                         if (*n_lk_ahd > 0)
1656                                 set_rxpkt_indication_flag(lk_ahds[0],
1657                                                           ep, packet);
1658                 } else
1659                         /*
1660                          * Packets in a bundle automatically have
1661                          * this flag set.
1662                          */
1663                         packet->info.rx.indicat_flags |=
1664                                 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1665
1666                 htc_update_rx_stats(ep, *n_lk_ahd);
1667
1668                 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1669                         ep->ep_st.rx_bundl += 1;
1670
1671                 do_rx_completion(ep, packet);
1672         }
1673
1674         return status;
1675 }
1676
1677 static int htc_fetch_rxpkts(struct htc_target *target,
1678                             struct list_head *rx_pktq,
1679                             struct list_head *comp_pktq)
1680 {
1681         int fetched_pkts;
1682         bool part_bundle = false;
1683         int status = 0;
1684
1685         /* now go fetch the list of HTC packets */
1686         while (!list_empty(rx_pktq)) {
1687                 fetched_pkts = 0;
1688
1689                 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1690                         /*
1691                          * There are enough packets to attempt a
1692                          * bundle transfer and recv bundling is
1693                          * allowed.
1694                          */
1695                         status = htc_issue_rxpkt_bundle(target, rx_pktq,
1696                                                         comp_pktq,
1697                                                         &fetched_pkts,
1698                                                         part_bundle);
1699                         if (status)
1700                                 return status;
1701
1702                         if (!list_empty(rx_pktq))
1703                                 part_bundle = true;
1704                 }
1705
1706                 if (!fetched_pkts) {
1707                         struct htc_packet *packet;
1708
1709                         packet = list_first_entry(rx_pktq, struct htc_packet,
1710                                                    list);
1711
1712                         list_del(&packet->list);
1713
1714                         /* fully synchronous */
1715                         packet->completion = NULL;
1716
1717                         if (!list_empty(rx_pktq))
1718                                 /*
1719                                  * look_aheads in all packet
1720                                  * except the last one in the
1721                                  * bundle must be ignored
1722                                  */
1723                                 packet->info.rx.rx_flags |=
1724                                         HTC_RX_PKT_IGNORE_LOOKAHEAD;
1725
1726                         /* go fetch the packet */
1727                         status = dev_rx_pkt(target, packet, packet->act_len);
1728                         if (status)
1729                                 return status;
1730
1731                         list_add_tail(&packet->list, comp_pktq);
1732                 }
1733         }
1734
1735         return status;
1736 }
1737
1738 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1739                                      u32 msg_look_ahead[], int *num_pkts)
1740 {
1741         struct htc_packet *packets, *tmp_pkt;
1742         struct htc_endpoint *endpoint;
1743         struct list_head rx_pktq, comp_pktq;
1744         int status = 0;
1745         u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1746         int num_look_ahead = 1;
1747         enum htc_endpoint_id id;
1748         int n_fetched = 0;
1749
1750         *num_pkts = 0;
1751
1752         /*
1753          * On first entry copy the look_aheads into our temp array for
1754          * processing
1755          */
1756         memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1757
1758         while (true) {
1759
1760                 /*
1761                  * First lookahead sets the expected endpoint IDs for all
1762                  * packets in a bundle.
1763                  */
1764                 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1765                 endpoint = &target->endpoint[id];
1766
1767                 if (id >= ENDPOINT_MAX) {
1768                         ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1769                                    id);
1770                         status = -ENOMEM;
1771                         break;
1772                 }
1773
1774                 INIT_LIST_HEAD(&rx_pktq);
1775                 INIT_LIST_HEAD(&comp_pktq);
1776
1777                 /*
1778                  * Try to allocate as many HTC RX packets indicated by the
1779                  * look_aheads.
1780                  */
1781                 status = alloc_and_prep_rxpkts(target, look_aheads,
1782                                                num_look_ahead, endpoint,
1783                                                &rx_pktq);
1784                 if (status)
1785                         break;
1786
1787                 if (get_queue_depth(&rx_pktq) >= 2)
1788                         /*
1789                          * A recv bundle was detected, force IRQ status
1790                          * re-check again
1791                          */
1792                         target->chk_irq_status_cnt = 1;
1793
1794                 n_fetched += get_queue_depth(&rx_pktq);
1795
1796                 num_look_ahead = 0;
1797
1798                 status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
1799
1800                 if (!status)
1801                         chk_rx_water_mark(endpoint);
1802
1803                 /* Process fetched packets */
1804                 status = htc_proc_fetched_rxpkts(target, &comp_pktq,
1805                                                  look_aheads, &num_look_ahead);
1806
1807                 if (!num_look_ahead || status)
1808                         break;
1809
1810                 /*
1811                  * For SYNCH processing, if we get here, we are running
1812                  * through the loop again due to a detected lookahead. Set
1813                  * flag that we should re-check IRQ status registers again
1814                  * before leaving IRQ processing, this can net better
1815                  * performance in high throughput situations.
1816                  */
1817                 target->chk_irq_status_cnt = 1;
1818         }
1819
1820         if (status) {
1821                 ath6kl_err("failed to get pending recv messages: %d\n",
1822                            status);
1823                 /*
1824                  * Cleanup any packets we allocated but didn't use to
1825                  * actually fetch any packets.
1826                  */
1827                 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1828                         list_del(&packets->list);
1829                         htc_reclaim_rxbuf(target, packets,
1830                                         &target->endpoint[packets->endpoint]);
1831                 }
1832
1833                 /* cleanup any packets in sync completion queue */
1834                 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1835                         list_del(&packets->list);
1836                         htc_reclaim_rxbuf(target, packets,
1837                                           &target->endpoint[packets->endpoint]);
1838                 }
1839
1840                 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1841                         ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1842                         ath6kldev_rx_control(target->dev, false);
1843                 }
1844         }
1845
1846         /*
1847          * Before leaving, check to see if host ran out of buffers and
1848          * needs to stop the receiver.
1849          */
1850         if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1851                 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1852                 ath6kldev_rx_control(target->dev, false);
1853         }
1854         *num_pkts = n_fetched;
1855
1856         return status;
1857 }
1858
1859 /*
1860  * Synchronously wait for a control message from the target,
1861  * This function is used at initialization time ONLY.  At init messages
1862  * on ENDPOINT 0 are expected.
1863  */
1864 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1865 {
1866         struct htc_packet *packet = NULL;
1867         struct htc_frame_hdr *htc_hdr;
1868         u32 look_ahead;
1869
1870         if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1871                                HTC_TARGET_RESPONSE_TIMEOUT))
1872                 return NULL;
1873
1874         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1875                 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1876
1877         htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1878
1879         if (htc_hdr->eid != ENDPOINT_0)
1880                 return NULL;
1881
1882         packet = htc_get_control_buf(target, false);
1883
1884         if (!packet)
1885                 return NULL;
1886
1887         packet->info.rx.rx_flags = 0;
1888         packet->info.rx.exp_hdr = look_ahead;
1889         packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1890
1891         if (packet->act_len > packet->buf_len)
1892                 goto fail_ctrl_rx;
1893
1894         /* we want synchronous operation */
1895         packet->completion = NULL;
1896
1897         /* get the message from the device, this will block */
1898         if (dev_rx_pkt(target, packet, packet->act_len))
1899                 goto fail_ctrl_rx;
1900
1901         /* process receive header */
1902         packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
1903
1904         if (packet->status) {
1905                 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1906                            packet->status);
1907                 goto fail_ctrl_rx;
1908         }
1909
1910         return packet;
1911
1912 fail_ctrl_rx:
1913         if (packet != NULL) {
1914                 htc_rxpkt_reset(packet);
1915                 reclaim_rx_ctrl_buf(target, packet);
1916         }
1917
1918         return NULL;
1919 }
1920
1921 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1922                                   struct list_head *pkt_queue)
1923 {
1924         struct htc_endpoint *endpoint;
1925         struct htc_packet *first_pkt;
1926         bool rx_unblock = false;
1927         int status = 0, depth;
1928
1929         if (list_empty(pkt_queue))
1930                 return -ENOMEM;
1931
1932         first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1933
1934         if (first_pkt->endpoint >= ENDPOINT_MAX)
1935                 return status;
1936
1937         depth = get_queue_depth(pkt_queue);
1938
1939         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1940                 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1941                 first_pkt->endpoint, depth, first_pkt->buf_len);
1942
1943         endpoint = &target->endpoint[first_pkt->endpoint];
1944
1945         if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1946                 struct htc_packet *packet, *tmp_pkt;
1947
1948                 /* walk through queue and mark each one canceled */
1949                 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1950                         packet->status = -ECANCELED;
1951                         list_del(&packet->list);
1952                         do_rx_completion(endpoint, packet);
1953                 }
1954
1955                 return status;
1956         }
1957
1958         spin_lock_bh(&target->rx_lock);
1959
1960         list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1961
1962         /* check if we are blocked waiting for a new buffer */
1963         if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1964                 if (target->ep_waiting == first_pkt->endpoint) {
1965                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1966                                 "receiver was blocked on ep:%d, unblocking.\n",
1967                                 target->ep_waiting);
1968                         target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1969                         target->ep_waiting = ENDPOINT_MAX;
1970                         rx_unblock = true;
1971                 }
1972         }
1973
1974         spin_unlock_bh(&target->rx_lock);
1975
1976         if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1977                 /* TODO : implement a buffer threshold count? */
1978                 ath6kldev_rx_control(target->dev, true);
1979
1980         return status;
1981 }
1982
1983 void ath6kl_htc_flush_rx_buf(struct htc_target *target)
1984 {
1985         struct htc_endpoint *endpoint;
1986         struct htc_packet *packet, *tmp_pkt;
1987         int i;
1988
1989         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1990                 endpoint = &target->endpoint[i];
1991                 if (!endpoint->svc_id)
1992                         /* not in use.. */
1993                         continue;
1994
1995                 spin_lock_bh(&target->rx_lock);
1996                 list_for_each_entry_safe(packet, tmp_pkt,
1997                                          &endpoint->rx_bufq, list) {
1998                         list_del(&packet->list);
1999                         spin_unlock_bh(&target->rx_lock);
2000                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2001                                    "flushing rx pkt:0x%p, len:%d, ep:%d\n",
2002                                    packet, packet->buf_len,
2003                                    packet->endpoint);
2004                         dev_kfree_skb(packet->pkt_cntxt);
2005                         spin_lock_bh(&target->rx_lock);
2006                 }
2007                 spin_unlock_bh(&target->rx_lock);
2008         }
2009 }
2010
2011 int ath6kl_htc_conn_service(struct htc_target *target,
2012                             struct htc_service_connect_req *conn_req,
2013                             struct htc_service_connect_resp *conn_resp)
2014 {
2015         struct htc_packet *rx_pkt = NULL;
2016         struct htc_packet *tx_pkt = NULL;
2017         struct htc_conn_service_resp *resp_msg;
2018         struct htc_conn_service_msg *conn_msg;
2019         struct htc_endpoint *endpoint;
2020         enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2021         unsigned int max_msg_sz = 0;
2022         int status = 0;
2023
2024         ath6kl_dbg(ATH6KL_DBG_TRC,
2025                    "htc_conn_service, target:0x%p service id:0x%X\n",
2026                    target, conn_req->svc_id);
2027
2028         if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2029                 /* special case for pseudo control service */
2030                 assigned_ep = ENDPOINT_0;
2031                 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2032         } else {
2033                 /* allocate a packet to send to the target */
2034                 tx_pkt = htc_get_control_buf(target, true);
2035
2036                 if (!tx_pkt)
2037                         return -ENOMEM;
2038
2039                 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2040                 memset(conn_msg, 0, sizeof(*conn_msg));
2041                 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2042                 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2043                 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2044
2045                 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2046                                  sizeof(*conn_msg) + conn_msg->svc_meta_len,
2047                                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2048
2049                 /* we want synchronous operation */
2050                 tx_pkt->completion = NULL;
2051                 htc_prep_send_pkt(tx_pkt, 0, 0, 0);
2052                 status = htc_issue_send(target, tx_pkt);
2053
2054                 if (status)
2055                         goto fail_tx;
2056
2057                 /* wait for response */
2058                 rx_pkt = htc_wait_for_ctrl_msg(target);
2059
2060                 if (!rx_pkt) {
2061                         status = -ENOMEM;
2062                         goto fail_tx;
2063                 }
2064
2065                 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2066
2067                 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2068                     || (rx_pkt->act_len < sizeof(*resp_msg))) {
2069                         status = -ENOMEM;
2070                         goto fail_tx;
2071                 }
2072
2073                 conn_resp->resp_code = resp_msg->status;
2074                 /* check response status */
2075                 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2076                         ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2077                                    resp_msg->svc_id, resp_msg->status);
2078                         status = -ENOMEM;
2079                         goto fail_tx;
2080                 }
2081
2082                 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2083                 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2084         }
2085
2086         if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2087                 status = -ENOMEM;
2088                 goto fail_tx;
2089         }
2090
2091         endpoint = &target->endpoint[assigned_ep];
2092         endpoint->eid = assigned_ep;
2093         if (endpoint->svc_id) {
2094                 status = -ENOMEM;
2095                 goto fail_tx;
2096         }
2097
2098         /* return assigned endpoint to caller */
2099         conn_resp->endpoint = assigned_ep;
2100         conn_resp->len_max = max_msg_sz;
2101
2102         /* setup the endpoint */
2103
2104         /* this marks the endpoint in use */
2105         endpoint->svc_id = conn_req->svc_id;
2106
2107         endpoint->max_txq_depth = conn_req->max_txq_depth;
2108         endpoint->len_max = max_msg_sz;
2109         endpoint->ep_cb = conn_req->ep_cb;
2110         endpoint->cred_dist.svc_id = conn_req->svc_id;
2111         endpoint->cred_dist.htc_rsvd = endpoint;
2112         endpoint->cred_dist.endpoint = assigned_ep;
2113         endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2114
2115         if (conn_req->max_rxmsg_sz) {
2116                 /*
2117                  * Override cred_per_msg calculation, this optimizes
2118                  * the credit-low indications since the host will actually
2119                  * issue smaller messages in the Send path.
2120                  */
2121                 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2122                         status = -ENOMEM;
2123                         goto fail_tx;
2124                 }
2125                 endpoint->cred_dist.cred_per_msg =
2126                     conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2127         } else
2128                 endpoint->cred_dist.cred_per_msg =
2129                     max_msg_sz / target->tgt_cred_sz;
2130
2131         if (!endpoint->cred_dist.cred_per_msg)
2132                 endpoint->cred_dist.cred_per_msg = 1;
2133
2134         /* save local connection flags */
2135         endpoint->conn_flags = conn_req->flags;
2136
2137 fail_tx:
2138         if (tx_pkt)
2139                 htc_reclaim_txctrl_buf(target, tx_pkt);
2140
2141         if (rx_pkt) {
2142                 htc_rxpkt_reset(rx_pkt);
2143                 reclaim_rx_ctrl_buf(target, rx_pkt);
2144         }
2145
2146         return status;
2147 }
2148
2149 static void reset_ep_state(struct htc_target *target)
2150 {
2151         struct htc_endpoint *endpoint;
2152         int i;
2153
2154         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2155                 endpoint = &target->endpoint[i];
2156                 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2157                 endpoint->svc_id = 0;
2158                 endpoint->len_max = 0;
2159                 endpoint->max_txq_depth = 0;
2160                 memset(&endpoint->ep_st, 0,
2161                        sizeof(endpoint->ep_st));
2162                 INIT_LIST_HEAD(&endpoint->rx_bufq);
2163                 INIT_LIST_HEAD(&endpoint->txq);
2164                 endpoint->target = target;
2165         }
2166
2167         /* reset distribution list */
2168         INIT_LIST_HEAD(&target->cred_dist_list);
2169 }
2170
2171 int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2172                              enum htc_endpoint_id endpoint)
2173 {
2174         int num;
2175
2176         spin_lock_bh(&target->rx_lock);
2177         num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2178         spin_unlock_bh(&target->rx_lock);
2179         return num;
2180 }
2181
2182 static void htc_setup_msg_bndl(struct htc_target *target)
2183 {
2184         /* limit what HTC can handle */
2185         target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2186                                        target->msg_per_bndl_max);
2187
2188         if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2189                 target->msg_per_bndl_max = 0;
2190                 return;
2191         }
2192
2193         /* limit bundle what the device layer can handle */
2194         target->msg_per_bndl_max = min(target->max_scat_entries,
2195                                        target->msg_per_bndl_max);
2196
2197         ath6kl_dbg(ATH6KL_DBG_TRC,
2198                    "htc bundling allowed. max msg per htc bundle: %d\n",
2199                    target->msg_per_bndl_max);
2200
2201         /* Max rx bundle size is limited by the max tx bundle size */
2202         target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2203         /* Max tx bundle size if limited by the extended mbox address range */
2204         target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2205                                      target->max_xfer_szper_scatreq);
2206
2207         ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
2208                    target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2209
2210         if (target->max_tx_bndl_sz)
2211                 target->tx_bndl_enable = true;
2212
2213         if (target->max_rx_bndl_sz)
2214                 target->rx_bndl_enable = true;
2215
2216         if ((target->tgt_cred_sz % target->block_sz) != 0) {
2217                 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2218                             target->tgt_cred_sz);
2219
2220                 /*
2221                  * Disallow send bundling since the credit size is
2222                  * not aligned to a block size the I/O block
2223                  * padding will spill into the next credit buffer
2224                  * which is fatal.
2225                  */
2226                 target->tx_bndl_enable = false;
2227         }
2228 }
2229
2230 int ath6kl_htc_wait_target(struct htc_target *target)
2231 {
2232         struct htc_packet *packet = NULL;
2233         struct htc_ready_ext_msg *rdy_msg;
2234         struct htc_service_connect_req connect;
2235         struct htc_service_connect_resp resp;
2236         int status;
2237
2238         /* we should be getting 1 control message that the target is ready */
2239         packet = htc_wait_for_ctrl_msg(target);
2240
2241         if (!packet)
2242                 return -ENOMEM;
2243
2244         /* we controlled the buffer creation so it's properly aligned */
2245         rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2246
2247         if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2248             (packet->act_len < sizeof(struct htc_ready_msg))) {
2249                 status = -ENOMEM;
2250                 goto fail_wait_target;
2251         }
2252
2253         if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2254                 status = -ENOMEM;
2255                 goto fail_wait_target;
2256         }
2257
2258         target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2259         target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2260
2261         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2262                    "target ready: credits: %d credit size: %d\n",
2263                    target->tgt_creds, target->tgt_cred_sz);
2264
2265         /* check if this is an extended ready message */
2266         if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2267                 /* this is an extended message */
2268                 target->htc_tgt_ver = rdy_msg->htc_ver;
2269                 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2270         } else {
2271                 /* legacy */
2272                 target->htc_tgt_ver = HTC_VERSION_2P0;
2273                 target->msg_per_bndl_max = 0;
2274         }
2275
2276         ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2277                   (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2278                   target->htc_tgt_ver);
2279
2280         if (target->msg_per_bndl_max > 0)
2281                 htc_setup_msg_bndl(target);
2282
2283         /* setup our pseudo HTC control endpoint connection */
2284         memset(&connect, 0, sizeof(connect));
2285         memset(&resp, 0, sizeof(resp));
2286         connect.ep_cb.rx = htc_ctrl_rx;
2287         connect.ep_cb.rx_refill = NULL;
2288         connect.ep_cb.tx_full = NULL;
2289         connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2290         connect.svc_id = HTC_CTRL_RSVD_SVC;
2291
2292         /* connect fake service */
2293         status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
2294
2295         if (status)
2296                 ath6kl_hif_cleanup_scatter(target->dev->ar);
2297
2298 fail_wait_target:
2299         if (packet) {
2300                 htc_rxpkt_reset(packet);
2301                 reclaim_rx_ctrl_buf(target, packet);
2302         }
2303
2304         return status;
2305 }
2306
2307 /*
2308  * Start HTC, enable interrupts and let the target know
2309  * host has finished setup.
2310  */
2311 int ath6kl_htc_start(struct htc_target *target)
2312 {
2313         struct htc_packet *packet;
2314         int status;
2315
2316         /* Disable interrupts at the chip level */
2317         ath6kldev_disable_intrs(target->dev);
2318
2319         target->htc_flags = 0;
2320         target->rx_st_flags = 0;
2321
2322         /* Push control receive buffers into htc control endpoint */
2323         while ((packet = htc_get_control_buf(target, false)) != NULL) {
2324                 status = htc_add_rxbuf(target, packet);
2325                 if (status)
2326                         return status;
2327         }
2328
2329         /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2330         ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2331                           target->tgt_creds);
2332
2333         dump_cred_dist_stats(target);
2334
2335         /* Indicate to the target of the setup completion */
2336         status = htc_setup_tx_complete(target);
2337
2338         if (status)
2339                 return status;
2340
2341         /* unmask interrupts */
2342         status = ath6kldev_unmask_intrs(target->dev);
2343
2344         if (status)
2345                 ath6kl_htc_stop(target);
2346
2347         return status;
2348 }
2349
2350 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2351 void ath6kl_htc_stop(struct htc_target *target)
2352 {
2353         spin_lock_bh(&target->htc_lock);
2354         target->htc_flags |= HTC_OP_STATE_STOPPING;
2355         spin_unlock_bh(&target->htc_lock);
2356
2357         /*
2358          * Masking interrupts is a synchronous operation, when this
2359          * function returns all pending HIF I/O has completed, we can
2360          * safely flush the queues.
2361          */
2362         ath6kldev_mask_intrs(target->dev);
2363
2364         ath6kl_htc_flush_txep_all(target);
2365
2366         ath6kl_htc_flush_rx_buf(target);
2367
2368         reset_ep_state(target);
2369 }
2370
2371 void *ath6kl_htc_create(struct ath6kl *ar)
2372 {
2373         struct htc_target *target = NULL;
2374         struct htc_packet *packet;
2375         int status = 0, i = 0;
2376         u32 block_size, ctrl_bufsz;
2377
2378         target = kzalloc(sizeof(*target), GFP_KERNEL);
2379         if (!target) {
2380                 ath6kl_err("unable to allocate memory\n");
2381                 return NULL;
2382         }
2383
2384         target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2385         if (!target->dev) {
2386                 ath6kl_err("unable to allocate memory\n");
2387                 status = -ENOMEM;
2388                 goto fail_create_htc;
2389         }
2390
2391         spin_lock_init(&target->htc_lock);
2392         spin_lock_init(&target->rx_lock);
2393         spin_lock_init(&target->tx_lock);
2394
2395         INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2396         INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2397         INIT_LIST_HEAD(&target->cred_dist_list);
2398
2399         target->dev->ar = ar;
2400         target->dev->htc_cnxt = target;
2401         target->ep_waiting = ENDPOINT_MAX;
2402
2403         reset_ep_state(target);
2404
2405         status = ath6kldev_setup(target->dev);
2406
2407         if (status)
2408                 goto fail_create_htc;
2409
2410         block_size = ar->mbox_info.block_size;
2411
2412         ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2413                       (block_size + HTC_HDR_LENGTH) :
2414                       (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2415
2416         for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2417                 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2418                 if (!packet)
2419                         break;
2420
2421                 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2422                 if (!packet->buf_start) {
2423                         kfree(packet);
2424                         break;
2425                 }
2426
2427                 packet->buf_len = ctrl_bufsz;
2428                 if (i < NUM_CONTROL_RX_BUFFERS) {
2429                         packet->act_len = 0;
2430                         packet->buf = packet->buf_start;
2431                         packet->endpoint = ENDPOINT_0;
2432                         list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2433                 } else
2434                         list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2435         }
2436
2437 fail_create_htc:
2438         if (i != NUM_CONTROL_BUFFERS || status) {
2439                 if (target) {
2440                         ath6kl_htc_cleanup(target);
2441                         target = NULL;
2442                 }
2443         }
2444
2445         return target;
2446 }
2447
2448 /* cleanup the HTC instance */
2449 void ath6kl_htc_cleanup(struct htc_target *target)
2450 {
2451         struct htc_packet *packet, *tmp_packet;
2452
2453         ath6kl_hif_cleanup_scatter(target->dev->ar);
2454
2455         list_for_each_entry_safe(packet, tmp_packet,
2456                         &target->free_ctrl_txbuf, list) {
2457                 list_del(&packet->list);
2458                 kfree(packet->buf_start);
2459                 kfree(packet);
2460         }
2461
2462         list_for_each_entry_safe(packet, tmp_packet,
2463                         &target->free_ctrl_rxbuf, list) {
2464                 list_del(&packet->list);
2465                 kfree(packet->buf_start);
2466                 kfree(packet);
2467         }
2468
2469         kfree(target->dev);
2470         kfree(target);
2471 }