868f79d2885ec9c0283d66007e556ad52c59e19e
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / esp8089 / esp_driver / esp_sip.c
1 /*
2  * Copyright (c) 2009 - 2013 Espressif System.
3  */
4
5 #include <linux/ieee80211.h>
6 #include <net/mac80211.h>
7 #include <net/cfg80211.h>
8 #include <linux/skbuff.h>
9 #include <linux/bitops.h>
10 #include <linux/version.h>
11 #include <linux/mmc/card.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/sdio_func.h>
15 #include <linux/mmc/sdio_ids.h>
16 #include <linux/mmc/sdio.h>
17 #include <linux/mmc/sd.h>
18 #include <linux/completion.h> 
19
20 #include "esp_pub.h"
21 #include "esp_sip.h"
22 #include "esp_ctrl.h"
23 #include "esp_sif.h"
24 #include "esp_debug.h"
25 #include "slc_host_register.h"
26 #include "esp_wmac.h"
27 #include "esp_utils.h"
28 #ifdef TEST_MODE
29 #include "testmode.h"
30 #endif
31
32 #ifdef USE_EXT_GPIO
33 #include "esp_ext.h"
34 #endif /* USE_EXT_GPIO */
35
36 extern struct completion *gl_bootup_cplx; 
37 static u32 bcn_counter = 0;
38 static u32 probe_rsp_counter = 0;
39
40 static int old_signal = -35;
41 static int avg_signal = 0;
42 static int signal_loop = 0;
43
44 #define SIGNAL_COUNT  300
45
46 #define TID_TO_AC(_tid) ((_tid)== 0||((_tid)==3)?WME_AC_BE:((_tid)<3)?WME_AC_BK:((_tid)<6)?WME_AC_VI:WME_AC_VO)
47
48 #ifdef SIP_DEBUG
49 #define esp_sip_dbg esp_dbg
50 struct sip_trace {
51         u32 tx_data;
52         u32 tx_cmd;
53         u32 rx_data;
54         u32 rx_evt;
55         u32 rx_tx_status;
56         u32 tx_out_of_credit;
57         u32 tx_one_shot_overflow;
58 };
59 static struct sip_trace str;
60 #define STRACE_TX_DATA_INC() (str.tx_data++)
61 #define STRACE_TX_CMD_INC()  (str.tx_cmd++)
62 #define STRACE_RX_DATA_INC() (str.rx_data++)
63 #define STRACE_RX_EVENT_INC() (str.rx_evt++)
64 #define STRACE_RX_TXSTATUS_INC() (str.rx_tx_status++)
65 #define STRACE_TX_OUT_OF_CREDIT_INC() (str.tx_out_of_credit++)
66 #define STRACE_TX_ONE_SHOT_INC() (str.tx_one_shot_overflow++)
67
68 #if 0
69 static void sip_show_trace(struct esp_sip *sip);
70 #endif //0000
71
72 #define STRACE_SHOW(sip)  sip_show_trace(sip)
73 #else
74 #define esp_sip_dbg(...)
75 #define STRACE_TX_DATA_INC()
76 #define STRACE_TX_CMD_INC()
77 #define STRACE_RX_DATA_INC()
78 #define STRACE_RX_EVENT_INC()
79 #define STRACE_RX_TXSTATUS_INC()
80 #define STRACE_TX_OUT_OF_CREDIT_INC()
81 #define STRACE_TX_ONE_SHOT_INC()
82 #define STRACE_SHOW(sip)
83 #endif /* SIP_DEBUG */
84
85 #define SIP_STOP_QUEUE_THRESHOLD 48
86 #define SIP_RESUME_QUEUE_THRESHOLD  12
87 #ifndef FAST_TX_STATUS
88 #define SIP_PENDING_STOP_TX_THRESHOLD 6
89 #define SIP_PENDING_RESUME_TX_THRESHOLD 6
90 #endif /* !FAST_TX_STATUS */
91
92 #define SIP_MIN_DATA_PKT_LEN    (sizeof(struct esp_mac_rx_ctrl) + 24) //24 is min 80211hdr
93 #define TARGET_RX_SIZE 524
94
95 #ifdef ESP_PREALLOC
96 extern struct sk_buff *esp_get_sip_skb(int size);
97 extern void esp_put_sip_skb(struct sk_buff **skb);
98
99 extern u8 *esp_get_tx_aggr_buf(void);
100 extern void esp_put_tx_aggr_buf(u8 **p);
101
102 #endif
103
104 static struct sip_pkt *sip_get_ctrl_buf(struct esp_sip *sip, SIP_BUF_TYPE bftype);
105
106 static void sip_reclaim_ctrl_buf(struct esp_sip *sip, struct sip_pkt *pkt, SIP_BUF_TYPE bftype);
107
108 static void sip_free_init_ctrl_buf(struct esp_sip *sip);
109
110 static void sip_dec_credit(struct esp_sip *sip);
111
112
113 static int sip_pack_pkt(struct esp_sip *sip, struct sk_buff *skb, int *pm_state);
114
115 static struct esp_mac_rx_ctrl *sip_parse_normal_mac_ctrl(struct sk_buff *skb, int * pkt_len_enc, int *buf_len, int *pulled_len);
116
117 static struct sk_buff * sip_parse_data_rx_info(struct esp_sip *sip, struct sk_buff *skb, int pkt_len_enc, int buf_len, struct esp_mac_rx_ctrl *mac_ctrl, int *pulled_len);
118
119 #ifndef RX_SYNC
120 static inline void sip_rx_pkt_enqueue(struct esp_sip *sip, struct sk_buff *skb);
121
122 static inline struct sk_buff * sip_rx_pkt_dequeue(struct esp_sip *sip);
123 #endif /* RX_SYNC */
124 #ifndef FAST_TX_STATUS
125 static void sip_after_tx_status_update(struct esp_sip *sip);
126 #endif /* !FAST_TX_STATUS */
127
128 static void sip_after_write_pkts(struct esp_sip *sip);
129
130 //static void show_data_seq(u8 *pkt);
131
132 static void sip_update_tx_credits(struct esp_sip *sip, u16 recycled_credits);
133
134 //static void sip_trigger_txq_process(struct esp_sip *sip);
135
136 #ifndef RX_SYNC
137 static bool sip_rx_pkt_process(struct esp_sip * sip, struct sk_buff *skb);
138 #else
139 static void sip_rx_pkt_process_sync(struct esp_sip *sip, struct sk_buff *skb);
140 #endif /* RX_SYNC */
141
142 #ifdef FAST_TX_STATUS
143 static void sip_tx_status_report(struct esp_sip *sip, struct sk_buff *skb, struct ieee80211_tx_info* tx_info, bool success);
144 #endif /* FAST_TX_STATUS */
145
146 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
147 static void sip_check_skb_alignment(struct sk_buff *skb);
148 #endif /* NEW_KERNEL */
149
150 #ifdef ESP_RX_COPYBACK_TEST
151 /* only for rx test */
152 static u8 *copyback_buf;
153 static u32 copyback_offset = 0;
154 #endif /* ESP_RX_COPYBACK_TEST */
155
156 #ifdef FPGA_TXDATA
157 int sip_send_tx_data(struct esp_sip *sip);
158 #endif/* FPGA_TXDATA */
159
160 #ifdef FPGA_LOOPBACK
161 int sip_send_loopback_cmd_mblk(struct esp_sip *sip);
162 #endif /* FPGA_LOOPBACK */
163
164 #ifdef SIP_DEBUG
165 #if 0
166 static void sip_show_trace(struct esp_sip *sip)
167 {
168         esp_sip_dbg(ESP_DBG_TRACE, "\n \t tx_data %u \t tx_cmd %u \t rx_data %u \t rx_evt %u \t rx_tx_status %u \n\n", \
169                     str.tx_data, str.tx_cmd, str.rx_data, str.rx_evt, str.rx_tx_status);
170 }
171 #endif //0000
172 #endif //SIP_DEBUG
173
174 #if 0
175 static void show_data_seq(u8 *pkt)
176 {
177         struct ieee80211_hdr * wh = (struct ieee80211_hdr *)pkt;
178         u16 seq = 0;
179
180         if (ieee80211_is_data(wh->frame_control)) {
181                 seq = (le16_to_cpu(wh->seq_ctrl) >> 4);
182                 esp_sip_dbg(ESP_DBG_TRACE, " ieee80211 seq %u addr1 %pM\n", seq, wh->addr1);
183         } else if (ieee80211_is_beacon(wh->frame_control) || ieee80211_is_probe_resp(wh->frame_control))
184                 esp_sip_dbg(ESP_DBG_TRACE, " ieee80211 probe resp or beacon 0x%04x\n", wh->frame_control);
185         else
186                 esp_sip_dbg(ESP_DBG_TRACE, " ieee80211 other mgmt pkt 0x%04x\n", wh->frame_control);
187 }
188 #endif //0000
189
190 //#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
191 static bool check_ac_tid(u8 *pkt, u8 ac, u8 tid)
192 {
193         struct ieee80211_hdr * wh = (struct ieee80211_hdr *)pkt;
194 #ifdef TID_DEBUG
195         u16 real_tid = 0;
196 #endif //TID_DEBUG
197
198         if (ieee80211_is_data_qos(wh->frame_control)) {
199 #ifdef TID_DEBUG
200                 real_tid = *ieee80211_get_qos_ctl(wh) & IEEE80211_QOS_CTL_TID_MASK;
201
202                 esp_sip_dbg(ESP_SHOW, "ac:%u, tid:%u, tid in pkt:%u\n", ac, tid, real_tid);
203                 if (tid != real_tid) {
204                         esp_sip_dbg(ESP_DBG_ERROR, "111 ac:%u, tid:%u, tid in pkt:%u\n", ac, tid, real_tid);
205                 }
206                 if (TID_TO_AC(tid) != ac) {
207                         esp_sip_dbg(ESP_DBG_ERROR, "222 ac:%u, tid:%u, tid in pkt:%u\n", ac, tid, real_tid);
208                 }
209
210 #endif /* TID_DEBUG*/
211         } else if (ieee80211_is_mgmt(wh->frame_control)) {
212 #ifdef TID_DEBUG
213                 esp_sip_dbg(ESP_SHOW, "ac:%u, tid:%u\n", ac, tid);
214                 if (tid != 7 || ac != WME_AC_VO) {
215                         esp_sip_dbg(ESP_DBG_ERROR, "333 ac:%u, tid:%u\n", ac, tid);
216                 }
217 #endif /* TID_DEBUG*/
218         } else {
219                 if (ieee80211_is_ctl(wh->frame_control)) {
220 #ifdef TID_DEBUG
221                         esp_sip_dbg(ESP_SHOW, "%s is ctrl pkt fc 0x%04x ac:%u, tid:%u, tid in pkt:%u\n", __func__, wh->frame_control, ac, tid, real_tid);
222 #endif /* TID_DEBUG*/
223                 } else {
224                         if (tid != 0 || ac != WME_AC_BE) {
225                                 //show_buf(pkt, 24);
226                                 esp_sip_dbg(ESP_DBG_LOG, "444 ac:%u, tid:%u \n", ac, tid);
227                                 if (tid == 7 && ac == WME_AC_VO)
228                                         return false;
229                         }
230                         return true; //hack to modify non-qos null data.
231
232                 }
233         }
234
235         return false;
236 }
237 //#endif /* NEW_KERNEL || KERNEL_35 */
238
239 static void sip_update_tx_credits(struct esp_sip *sip, u16 recycled_credits)
240 {
241         esp_sip_dbg(ESP_DBG_TRACE, "%s:before add, credits is %d\n", __func__, atomic_read(&sip->tx_credits));
242         atomic_add(recycled_credits, &sip->tx_credits);
243         esp_sip_dbg(ESP_DBG_TRACE, "%s:after add %d, credits is %d\n", __func__, recycled_credits, atomic_read(&sip->tx_credits));
244 }
245
246 void sip_trigger_txq_process(struct esp_sip *sip)
247 {
248         if (atomic_read(&sip->tx_credits) <= sip->credit_to_reserve)  //no credits, do nothing
249                 return;
250
251         if (sip_queue_may_resume(sip)) {
252                 /* wakeup upper queue only if we have sufficient credits */
253                 esp_sip_dbg(ESP_DBG_TRACE, "%s wakeup ieee80211 txq \n", __func__);
254                 atomic_set(&sip->epub->txq_stopped, false);
255                 ieee80211_wake_queues(sip->epub->hw);
256         } else if (atomic_read(&sip->epub->txq_stopped) ) {
257                 esp_sip_dbg(ESP_DBG_TRACE, "%s can't wake txq, credits: %d \n", __func__, atomic_read(&sip->tx_credits) );
258         }
259
260         if (!skb_queue_empty(&sip->epub->txq)) {
261                 /* try to send out pkt already in sip queue once we have credits */
262                 esp_sip_dbg(ESP_DBG_TRACE, "%s resume sip txq \n", __func__);
263
264 #if !defined(FPGA_TXDATA) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
265                 if(sif_get_ate_config() == 0){
266                         ieee80211_queue_work(sip->epub->hw, &sip->epub->tx_work);
267                 } else {
268                         queue_work(sip->epub->esp_wkq, &sip->epub->tx_work);
269                 } 
270 #else
271                 queue_work(sip->epub->esp_wkq, &sip->epub->tx_work);
272 #endif
273         }
274 }
275
276 static bool sip_ampdu_occupy_buf(struct esp_sip *sip, struct esp_rx_ampdu_len * ampdu_len)
277 {
278         return (ampdu_len->substate == 0 || esp_wmac_rxsec_error(ampdu_len->substate) || (sip->dump_rpbm_err && ampdu_len->substate == RX_RPBM_ERR));
279 }
280
281 #ifdef RX_SYNC
282 static void sip_rx_pkt_process_sync(struct esp_sip *sip, struct sk_buff *skb)
283 #else
284 static bool sip_rx_pkt_process(struct esp_sip * sip, struct sk_buff *skb)
285 #endif /* RX_SYNC */
286 {
287 #define DO_NOT_COPY false
288 #define DO_COPY true
289
290         struct sip_hdr * hdr = NULL;
291         struct sk_buff * rskb = NULL;
292         int remains_len = 0;
293         int first_pkt_len = 0;
294         u8 *bufptr = NULL;
295         int ret = 0;
296         bool trigger_rxq = false;
297 #ifdef RX_SYNC
298         bool trigger_txq = false;
299 #endif/* RX_SYNC */
300
301         if (skb == NULL) {
302                 esp_sip_dbg(ESP_DBG_ERROR, "%s NULL SKB!!!!!!!! \n", __func__);
303 #ifdef RX_SYNC
304                 return;
305 #else
306                 return trigger_rxq;
307 #endif /* RX_SYNC */
308         }
309
310         hdr = (struct sip_hdr *)skb->data;
311         bufptr = skb->data;
312
313
314         esp_sip_dbg(ESP_DBG_TRACE, "%s Hcredits 0x%08x, realCredits %d\n", __func__, hdr->h_credits, hdr->h_credits & SIP_CREDITS_MASK);
315         if (hdr->h_credits & SIP_CREDITS_MASK) {
316                 sip_update_tx_credits(sip, hdr->h_credits & SIP_CREDITS_MASK);
317 #ifdef RX_SYNC
318                 trigger_txq = true;
319 #endif/* RX_SYNC */
320         }
321
322         hdr->h_credits &= ~SIP_CREDITS_MASK; /* clean credits in sip_hdr, prevent over-add */
323
324         esp_sip_dbg(ESP_DBG_TRACE, "%s credits %d\n", __func__, hdr->h_credits);
325
326         /*
327          * first pkt's length is stored in  recycled_credits first 20 bits
328          * config w3 [31:12]
329          * repair hdr->len of first pkt
330          */
331         remains_len = hdr->len;
332         first_pkt_len = hdr->h_credits >> 12;
333         hdr->len = first_pkt_len;
334
335         esp_dbg(ESP_DBG_TRACE, "%s first_pkt_len %d, whole pkt len %d \n", __func__, first_pkt_len, remains_len);
336         if (first_pkt_len > remains_len) {
337                 esp_dbg(ESP_DBG_ERROR, "first_pkt_len %d, whole pkt len %d\n", first_pkt_len, remains_len);
338                 show_buf((u8 *)hdr, first_pkt_len);
339                 ESSERT(0);
340                 goto _exit;
341         }
342
343         /*
344          * pkts handling, including the first pkt, should alloc new skb for each data pkt.
345          * free the original whole skb after parsing is done.
346          */
347         while (remains_len) {
348                 if (remains_len < sizeof(struct sip_hdr)) {
349                         ESSERT(0);
350                         show_buf((u8 *)hdr, 512);
351                         goto _exit;
352                 }
353                 
354                 hdr = (struct sip_hdr *)bufptr;
355                 if (hdr->len <= 0) {
356                         show_buf((u8 *)hdr, 512);
357                         ESSERT(0);
358                         goto _exit;
359                 }
360
361                 if((hdr->len & 3) != 0) {
362                         show_buf((u8 *)hdr, 512);
363                         ESSERT(0);
364                         goto _exit;
365                 }
366                 if (unlikely(hdr->seq != sip->rxseq++)) {
367                         esp_dbg(ESP_DBG_ERROR, "%s seq mismatch! got %u, expect %u\n", __func__, hdr->seq, sip->rxseq-1);
368                         sip->rxseq = hdr->seq + 1;
369                         show_buf(bufptr, 32);
370                         ESSERT(0);
371                 }
372
373                 if (SIP_HDR_IS_CTRL(hdr)) {
374                         STRACE_RX_EVENT_INC();
375                         esp_sip_dbg(ESP_DBG_TRACE, "seq %u \n", hdr->seq);
376
377                         ret = sip_parse_events(sip, bufptr);
378
379                         skb_pull(skb, hdr->len);
380
381                 } else if (SIP_HDR_IS_DATA(hdr)) {
382                         struct esp_mac_rx_ctrl * mac_ctrl = NULL;
383                         int pkt_len_enc = 0, buf_len = 0, pulled_len = 0;
384
385                         STRACE_RX_DATA_INC();
386                         esp_sip_dbg(ESP_DBG_TRACE, "seq %u \n", hdr->seq);
387                         mac_ctrl = sip_parse_normal_mac_ctrl(skb, &pkt_len_enc, &buf_len, &pulled_len);
388                         rskb = sip_parse_data_rx_info(sip, skb, pkt_len_enc, buf_len, mac_ctrl, &pulled_len);
389
390                         if(rskb == NULL)
391                                 goto _move_on;
392
393 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
394                         sip_check_skb_alignment(rskb);
395 #endif /* !NEW_KERNEL */
396                         if (likely(atomic_read(&sip->epub->wl.off) == 0)) {
397 #ifndef RX_SENDUP_SYNC
398                                 skb_queue_tail(&sip->epub->rxq, rskb);
399                                 trigger_rxq = true;
400 #else
401 #ifdef RX_CHECKSUM_TEST
402                                 esp_rx_checksum_test(rskb);
403 #endif
404                                 local_bh_disable();
405 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
406                                 ieee80211_rx(sip->epub->hw, rskb);
407 #else
408                 //simulate IEEE80211_SKB_RXCB in 2.6.32 
409                 ieee80211_rx(sip->epub->hw, rskb ,(struct ieee80211_rx_status *)rskb->cb);
410 #endif
411                                 local_bh_enable();
412 #endif /* RX_SENDUP_SYNC */
413                         } else {
414                                 /* still need go thro parsing as skb_pull should invoke */
415                                 kfree_skb(rskb);
416                         }
417                 } else if (SIP_HDR_IS_AMPDU(hdr)) {
418                         struct esp_mac_rx_ctrl * mac_ctrl = NULL;
419                         struct esp_mac_rx_ctrl new_mac_ctrl;
420                         struct esp_rx_ampdu_len *ampdu_len;
421                         int pkt_num;
422                         int pulled_len = 0;
423                         static int pkt_dropped = 0;
424                         static int pkt_total = 0;
425                         bool have_rxabort = false;
426                         bool have_goodpkt = false;
427                         static u8 frame_head[16];
428                         static u8 frame_buf_ttl = 0;
429
430                         ampdu_len = (struct esp_rx_ampdu_len *)(skb->data + hdr->len/sip->rx_blksz * sip->rx_blksz);
431                         esp_sip_dbg(ESP_DBG_TRACE, "%s rx ampdu total len %u\n", __func__, hdr->len);
432                         if(skb->data != (u8 *)hdr) {
433                                 printk("%p %p\n", skb->data, hdr);
434                                 show_buf(skb->data, 512);
435                                 show_buf((u8 *)hdr, 512);
436                                 ESSERT(0);
437                                 goto _exit;
438                         }
439                         mac_ctrl = sip_parse_normal_mac_ctrl(skb, NULL, NULL, &pulled_len);
440                         memcpy(&new_mac_ctrl, mac_ctrl, sizeof(struct esp_mac_rx_ctrl));
441                         mac_ctrl = &new_mac_ctrl;
442                         pkt_num = mac_ctrl->ampdu_cnt;
443                         esp_sip_dbg(ESP_DBG_TRACE, "%s %d rx ampdu %u pkts, %d pkts dumped, first len %u\n",
444                                         __func__, __LINE__, (unsigned int)((hdr->len % sip->rx_blksz) / sizeof(struct esp_rx_ampdu_len)), pkt_num, (unsigned int)ampdu_len->sublen);
445
446                         pkt_total += mac_ctrl->ampdu_cnt;
447                         //esp_sip_dbg(ESP_DBG_ERROR, "%s ampdu dropped %d/%d\n", __func__, pkt_dropped, pkt_total);
448                         while (pkt_num > 0) {
449                                 esp_sip_dbg(ESP_DBG_TRACE, "%s %d ampdu sub state %02x,\n", __func__, __LINE__, ampdu_len->substate);
450
451                                 if (sip_ampdu_occupy_buf(sip, ampdu_len)) { //pkt is dumped
452
453                                         rskb = sip_parse_data_rx_info(sip, skb, ampdu_len->sublen - FCS_LEN, 0, mac_ctrl, &pulled_len);
454                                         if (!rskb) {
455                                                 ESSERT(0);
456                                                 goto _exit;
457                                         }
458
459                                         if (likely(atomic_read(&sip->epub->wl.off) == 0) &&
460                                                         (ampdu_len->substate == 0 || ampdu_len->substate == RX_TKIPMIC_ERR ||
461                                                          (sip->sendup_rpbm_pkt && ampdu_len->substate == RX_RPBM_ERR)) &&
462                                                         (sip->rxabort_fixed || !have_rxabort) ) 
463                                         {
464                                                 if(!have_goodpkt) {
465                                                         have_goodpkt = true;
466                                                         memcpy(frame_head, rskb->data, 16);
467                                                         frame_head[1] &= ~0x80;
468                                                         frame_buf_ttl = 3;
469                                                 }
470 #ifndef RX_SENDUP_SYNC
471                                                 skb_queue_tail(&sip->epub->rxq, rskb);
472                                                 trigger_rxq = true;
473 #else
474 #ifdef RX_CHECKSUM_TEST
475                                                 esp_rx_checksum_test(rskb);
476 #endif
477                                                 local_bh_disable();
478 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
479                                 ieee80211_rx(sip->epub->hw, rskb);
480 #else
481                 //simulate IEEE80211_SKB_RXCB in 2.6.32 
482                 ieee80211_rx(sip->epub->hw, rskb ,(struct ieee80211_rx_status *)rskb->cb);
483 #endif
484                                                 local_bh_enable();
485 #endif /* RX_SENDUP_SYNC */
486
487                                         } else {
488                                                 kfree_skb(rskb);
489                                         }
490                                 } else {
491                                         if (ampdu_len->substate == RX_ABORT) {
492                                                 u8 * a;
493                                                 have_rxabort = true;
494                                                 esp_sip_dbg(ESP_DBG_TRACE, "rx abort %d %d\n", frame_buf_ttl, pkt_num);
495                                                 if(frame_buf_ttl && !sip->rxabort_fixed) {
496                                                         struct esp_rx_ampdu_len * next_good_ampdu_len = ampdu_len + 1;
497                                                         a = frame_head;
498                                                         esp_sip_dbg(ESP_DBG_TRACE, "frame:%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
499                                                                         a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]);
500                                                         while(!sip_ampdu_occupy_buf(sip, next_good_ampdu_len)) {
501                                                                 if(next_good_ampdu_len > ampdu_len + pkt_num - 1)
502                                                                         break;
503                                                                 next_good_ampdu_len++;
504
505                                                         }
506                                                         if(next_good_ampdu_len <= ampdu_len + pkt_num -1) {
507                                                                 bool b0, b10, b11;
508                                                                 a = skb->data;
509                                                                 esp_sip_dbg(ESP_DBG_TRACE, "buf:%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
510                                                                                 a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]);
511                                                                 b0 = memcmp(frame_head + 4, skb->data + 4, 12) == 0;
512                                                                 b10 = memcmp(frame_head + 10, skb->data, 6) == 0;
513                                                                 b11 = memcpy(frame_head + 11, skb->data, 5) == 0;
514                                                                 esp_sip_dbg(ESP_DBG_TRACE, "com %d %d %d\n", b0, b10, b11);
515                                                                 if(b0 && !b10 && !b11) {
516                                                                         have_rxabort = false;
517                                                                         esp_sip_dbg(ESP_DBG_TRACE, "repair 0\n");
518                                                                 } else if(!b0 && b10 && !b11) {
519                                                                         skb_push(skb, 10);
520                                                                         memcpy(skb->data, frame_head, 10);
521                                                                         have_rxabort = false;
522                                                                         pulled_len -= 10;
523                                                                         esp_sip_dbg(ESP_DBG_TRACE, "repair 10\n");
524                                                                 } else if(!b0 && !b10 && b11) {
525                                                                         skb_push(skb, 11);
526                                                                         memcpy(skb->data, frame_head, 11);
527                                                                         have_rxabort = false;
528                                                                         pulled_len -= 11;
529                                                                         esp_sip_dbg(ESP_DBG_TRACE, "repair 11\n");
530                                                                 }
531                                                         }
532                                                 }
533                                         }
534                                         pkt_dropped++;
535                                         esp_sip_dbg(ESP_DBG_LOG, "%s ampdu dropped %d/%d\n", __func__, pkt_dropped, pkt_total);
536                                 }
537                                 pkt_num--;
538                                 ampdu_len++;
539                         }
540                         if(frame_buf_ttl)
541                                 frame_buf_ttl--;
542                         skb_pull(skb, hdr->len - pulled_len);
543                 } else {
544                         esp_sip_dbg(ESP_DBG_ERROR, "%s %d unknown type\n", __func__, __LINE__);
545                 }
546
547 _move_on:
548                 if (hdr->len < remains_len) {
549                         remains_len -= hdr->len;
550                 } else {
551                         break;
552                 }
553                 bufptr += hdr->len;
554         }
555
556 _exit:
557 #ifdef ESP_PREALLOC 
558         esp_put_sip_skb(&skb);
559 #else
560         kfree_skb(skb);
561 #endif
562
563 #ifdef RX_SYNC
564         if (trigger_rxq) {
565                 queue_work(sip->epub->esp_wkq, &sip->epub->sendup_work);
566         }
567         if (trigger_txq) {
568                 sip_trigger_txq_process(sip);
569         }
570 #else
571         return trigger_rxq;
572 #endif /* RX_SYNC */
573
574 #undef DO_NOT_COPY
575 #undef DO_COPY
576 }
577
578 #ifndef RX_SYNC
579 static void _sip_rxq_process(struct esp_sip *sip)
580 {
581         struct sk_buff *skb = NULL;
582         bool sendup = false;
583
584         while ((skb = skb_dequeue(&sip->rxq))) {
585                 if (sip_rx_pkt_process(sip, skb))
586                         sendup = true;
587         }
588 #ifndef RX_SENDUP_SYNC
589         if (sendup) {
590                 queue_work(sip->epub->esp_wkq, &sip->epub->sendup_work);
591         }
592 #endif /* !RX_SENDUP_SYNC */
593
594         /* probably tx_credit is updated, try txq */
595         sip_trigger_txq_process(sip);
596 }
597
598 void sip_rxq_process(struct work_struct *work)
599 {
600         struct esp_sip *sip = container_of(work, struct esp_sip, rx_process_work);
601         if (sip == NULL) {
602                 ESSERT(0);
603                 return;
604         }
605
606         if (unlikely(atomic_read(&sip->state) == SIP_SEND_INIT)) {
607                 sip_send_chip_init(sip);
608                 atomic_set(&sip->state, SIP_WAIT_BOOTUP);
609                 return;
610         }
611
612         mutex_lock(&sip->rx_mtx);
613         _sip_rxq_process(sip);
614         mutex_unlock(&sip->rx_mtx);
615 }
616
617 static inline void sip_rx_pkt_enqueue(struct esp_sip *sip, struct sk_buff *skb)
618 {
619         skb_queue_tail(&sip->rxq, skb);
620 }
621
622 static inline struct sk_buff * sip_rx_pkt_dequeue(struct esp_sip *sip) {
623         return skb_dequeue(&sip->rxq);
624 }
625 #endif /* RX_SYNC */
626
627 static u32 sip_rx_count = 0;
628 void sip_debug_show(struct esp_sip *sip)
629 {
630         esp_sip_dbg(ESP_DBG_ERROR, "txq left %d %d\n", skb_queue_len(&sip->epub->txq), atomic_read(&sip->tx_data_pkt_queued));
631         esp_sip_dbg(ESP_DBG_ERROR, "tx queues stop ? %d\n", atomic_read(&sip->epub->txq_stopped));
632         esp_sip_dbg(ESP_DBG_ERROR, "txq stop?  %d\n", test_bit(ESP_WL_FLAG_STOP_TXQ, &sip->epub->wl.flags));
633         esp_sip_dbg(ESP_DBG_ERROR, "tx credit %d\n", atomic_read(&sip->tx_credits));
634         esp_sip_dbg(ESP_DBG_ERROR, "rx collect %d\n", sip_rx_count);
635         sip_rx_count = 0;
636 }
637
638 int sip_rx(struct esp_pub *epub)
639 {
640         struct sip_hdr *shdr = NULL;
641         struct esp_sip *sip = epub->sip;
642         int err = 0;
643         struct sk_buff *first_skb = NULL;
644 #ifndef LOOKAHEAD
645         struct sk_buff *next_skb = NULL;
646 #endif
647         u8 *rx_buf = NULL;
648         u32 rx_blksz;
649         struct sk_buff *rx_skb = NULL;
650
651         u32 first_sz; 
652
653 #ifdef LOOKAHEAD
654 #ifndef SIF_DSR_WAR
655         struct slc_host_regs *regs = sif_get_regs(epub);
656
657         memset(regs, 0x0, sizeof(struct slc_host_regs));
658         esp_common_read_with_addr(epub, REG_SLC_HOST_BASE + 8, (u8 *)regs, sizeof(struct slc_host_regs), ESP_SIF_SYNC);
659 #endif
660         first_sz = sif_get_regs(epub)->config_w0;
661 #else 
662         first_sz = 4;
663 #endif
664
665         esp_sip_dbg(ESP_DBG_LOG, "%s enter\n", __func__);
666
667
668         /* first read one block out, if we luck enough, that's it
669          *
670          *  To make design as simple as possible, we allocate skb(s)
671          *  separately for each sif read operation to avoid global
672          *  read_buf_pointe access.  It coule be optimized late.
673          */
674         rx_blksz = sif_get_blksz(epub);
675 #ifdef LOOKAHEAD
676 #ifdef ESP_PREALLOC
677         first_skb = esp_get_sip_skb(roundup(first_sz, rx_blksz));
678 #else 
679         first_skb = __dev_alloc_skb(roundup(first_sz, rx_blksz), GFP_KERNEL);
680 #endif /* ESP_PREALLOC */
681 #else
682 #ifdef ESP_PREALLOC
683         first_skb = esp_get_sip_skb(first_sz);
684 #else
685         first_skb = __dev_alloc_skb(first_sz, GFP_KERNEL);
686 #endif /* ESP_PREALLOC */
687 #endif /* LOOKAHEAD */
688
689         if (first_skb == NULL) {
690                     sif_unlock_bus(epub);
691                 esp_sip_dbg(ESP_DBG_ERROR, "%s first no memory \n", __func__);
692                 goto _err;
693         }
694
695         rx_buf = skb_put(first_skb, first_sz);
696         esp_sip_dbg(ESP_DBG_LOG, "%s rx_buf ptr %p, first_sz %d\n", __func__, rx_buf, first_sz);
697
698
699 #ifdef USE_EXT_GPIO
700         do{
701                 int err2 = 0;
702                 u16 value = 0;
703                 u16 intr_mask = ext_gpio_get_int_mask_reg();
704                 if(!intr_mask)
705                         break;
706         value = sif_get_regs(epub)->config_w3 & intr_mask;
707         if(value)
708         {
709             err2 = sif_interrupt_target(epub, 6);
710             esp_sip_dbg(ESP_DBG, "write gpio\n");
711         }
712
713                 if(!err2 && value) {
714                         esp_sip_dbg(ESP_DBG_TRACE, "%s intr_mask[0x%04x] value[0x%04x]\n", __func__, intr_mask, value);
715                         ext_gpio_int_process(value);
716                 }
717         }while(0);
718 #endif
719
720 #ifdef LOOKAHEAD
721 #ifdef ESP_ACK_INTERRUPT
722 #ifdef ESP_ACK_LATER
723                 err = esp_common_read(epub, rx_buf, first_sz, ESP_SIF_NOSYNC, false);
724         sif_platform_ack_interrupt(epub);
725 #else
726         sif_platform_ack_interrupt(epub);
727                 err = esp_common_read(epub, rx_buf, first_sz, ESP_SIF_NOSYNC, false);
728 #endif /* ESP_ACK_LATER */
729 #else
730         err = esp_common_read(epub, rx_buf, first_sz, ESP_SIF_NOSYNC, false);
731 #endif //ESP_ACK_INTERRUPT
732 #else
733 #ifdef ESP_ACK_INTERRUPT
734 #ifdef ESP_ACK_LATER
735                 err = esp_common_read(epub, rx_buf, first_sz, ESP_SIF_NOSYNC, true);
736         sif_platform_ack_interrupt(epub);
737 #else
738         sif_platform_ack_interrupt(epub);
739                 err = esp_common_read(epub, rx_buf, first_sz, ESP_SIF_NOSYNC, true);
740 #endif /* ESP_ACK_LATER */
741 #else
742         err = esp_common_read(epub, rx_buf, first_sz, ESP_SIF_NOSYNC, true);
743 #endif //ESP_ACK_INTERRUPT
744 #endif //LOOKAHEAD 
745         sip_rx_count++;
746         if (unlikely(err)) {
747                 esp_dbg(ESP_DBG_ERROR, " %s first read err %d %d\n", __func__, err, sif_get_regs(epub)->config_w0);
748 #ifdef ESP_PREALLOC
749                 esp_put_sip_skb(&first_skb);
750 #else
751                 kfree_skb(first_skb);
752 #endif /* ESP_PREALLOC */
753                 sif_unlock_bus(epub);
754                 goto _err;
755         }
756
757         shdr = (struct sip_hdr *)rx_buf;
758         if(SIP_HDR_IS_CTRL(shdr) && (shdr->c_evtid == SIP_EVT_SLEEP)) {
759                 atomic_set(&sip->epub->ps.state, ESP_PM_ON);
760                 esp_dbg(ESP_DBG_TRACE, "s\n");
761         }
762
763         if ((shdr->len & 3) != 0){
764                 esp_sip_dbg(ESP_DBG_ERROR, "%s shdr->len[%d] error\n", __func__, shdr->len);
765 #ifdef ESP_PREALLOC
766                 esp_put_sip_skb(&first_skb);
767 #else
768                 kfree_skb(first_skb);
769 #endif /* ESP_PREALLOC */
770                 sif_unlock_bus(epub);
771                 err = -EIO;
772                 goto _err;
773         }
774
775 #ifdef LOOKAHEAD
776         if (shdr->len != first_sz){
777                 esp_sip_dbg(ESP_DBG_ERROR, "%s shdr->len[%d]  first_size[%d] error\n", __func__, shdr->len, first_sz);
778 #ifdef ESP_PREALLOC
779                 esp_put_sip_skb(&first_skb);
780 #else
781                 kfree_skb(first_skb);
782 #endif /* ESP_PREALLOC */
783                 sif_unlock_bus(epub);
784                 err = -EIO;
785                 goto _err;
786         }
787 #else
788         if (shdr->len > first_sz)  {
789                 /* larger than one blk, fetch the rest */
790 #ifdef ESP_PREALLOC
791                 next_skb = esp_get_sip_skb(roundup(shdr->len, rx_blksz) + first_sz);
792 #else
793                 next_skb = __dev_alloc_skb(roundup(shdr->len, rx_blksz) + first_sz, GFP_KERNEL);
794 #endif /* ESP_PREALLOC */
795
796                 if (unlikely(next_skb == NULL)) {
797                         sif_unlock_bus(epub);
798                         esp_sip_dbg(ESP_DBG_ERROR, "%s next no memory \n", __func__);
799 #ifdef ESP_PREALLOC
800                         esp_put_sip_skb(&first_skb);
801 #else
802                         kfree_skb(first_skb);
803 #endif /* ESP_PREALLOC */
804                         goto _err;
805                 }
806                 rx_buf = skb_put(next_skb, shdr->len);
807                 rx_buf += first_sz; /* skip the first block */
808
809                 err = esp_common_read(epub, rx_buf, (shdr->len - first_sz), ESP_SIF_NOSYNC, false);
810                 sif_unlock_bus(epub);
811
812                 if (unlikely(err)) {
813                         esp_sip_dbg(ESP_DBG_ERROR, "%s next read err %d \n", __func__, err);
814 #ifdef ESP_PREALLOC
815                         esp_put_sip_skb(&first_skb);
816                         esp_put_sip_skb(&next_skb);
817 #else
818                         kfree_skb(first_skb);
819                         kfree_skb(next_skb);
820 #endif /* ESP_PREALLOC */
821                         goto _err;
822                 }
823                 /* merge two skbs, TBD: could be optimized by skb_linearize*/
824                 memcpy(next_skb->data, first_skb->data, first_sz);
825                 esp_dbg(ESP_DBG_TRACE, " %s  next skb\n", __func__);
826
827                 rx_skb = next_skb;
828 #ifdef ESP_PREALLOC
829                 esp_put_sip_skb(&first_skb);
830 #else
831                 kfree_skb(first_skb);
832 #endif /* ESP_PREALLOC */
833         }
834 #endif 
835         else {
836                 sif_unlock_bus(epub);
837                 skb_trim(first_skb, shdr->len);
838                 esp_dbg(ESP_DBG_TRACE, " %s first_skb only\n", __func__);
839
840                 rx_skb = first_skb;
841         }
842         if (atomic_read(&sip->state) == SIP_STOP) {
843 #ifdef ESP_PREALLOC
844                 esp_put_sip_skb(&rx_skb);
845 #else
846                 kfree_skb(rx_skb);
847 #endif /* ESP_PREALLOC */
848                 esp_sip_dbg(ESP_DBG_ERROR, "%s when sip stopped\n", __func__);
849                 return 0;
850         }
851 #ifndef RX_SYNC
852         sip_rx_pkt_enqueue(sip, rx_skb);
853         queue_work(sip->epub->esp_wkq, &sip->rx_process_work);
854 #else
855         sip_rx_pkt_process_sync(sip, rx_skb);
856 #endif /* RX_SYNC */
857
858 _err:
859         return err;
860 }
861
862 int sip_get_raw_credits(struct esp_sip *sip)
863 {
864 #if 1
865         unsigned long timeout;
866         int err = 0;
867
868         esp_dbg(ESP_DBG_TRACE, "%s entern \n", __func__);
869
870         /* 1s timeout */
871         timeout = jiffies + msecs_to_jiffies(1000);
872
873         while (time_before(jiffies, timeout) && !sip->boot_credits) {
874
875                 err = esp_common_read_with_addr(sip->epub, SLC_HOST_TOKEN_RDATA, (u8 *)&sip->boot_credits, 4, ESP_SIF_SYNC);
876
877                 if (err) {
878                         esp_dbg(ESP_DBG_ERROR, "Can't read credits\n");
879                         return err;
880                 }
881                 sip->boot_credits &= SLC_HOST_TOKEN0_MASK;
882 #ifdef SIP_DEBUG
883                 if (sip->boot_credits == 0) {
884                         esp_dbg(ESP_DBG_ERROR, "no credit, try again\n");
885                         mdelay(50);
886                 }
887 #endif /* SIP_DEBUG */
888         }
889
890         if (!sip->boot_credits) {
891                 esp_dbg(ESP_DBG_ERROR, "read credits timeout\n");
892                 return -ETIMEDOUT;
893         }
894
895         esp_dbg(ESP_DBG_TRACE, "%s got credits: %d\n", __func__, sip->boot_credits);
896 #endif //0000
897
898         return 0;
899 }
900
901
902 /* Only cooperate with get_raw_credits */
903 static void
904 sip_dec_credit(struct esp_sip *sip)
905 {
906 #if 0
907         u32 reg = 0;
908         int err = 0;
909
910         reg = SLC_HOST_TOKEN0_WR | SLC_HOST_TOKEN0_DEC;
911         memcpy(sip->rawbuf, &reg, sizeof(u32));
912         err = sif_io_sync(sip->epub, SLC_HOST_INT_CLR, sip->rawbuf, sizeof(u32), SIF_TO_DEVICE | SIF_SYNC | SIF_BYTE_BASIS | SIF_INC_ADDR);
913
914         if (err)
915                 esp_dbg(ESP_DBG_ERROR, "%s can't clear target token0 \n", __func__);
916
917 #endif //0000
918         /* SLC 2.0, token reg is read-to-clean, thus no need to access target */
919         sip->boot_credits--;
920 }
921
922 int sip_post_init(struct esp_sip *sip, struct sip_evt_bootup2 *bevt)
923 {
924         struct esp_pub *epub;
925 #ifndef ESP_PREALLOC
926         int po = 0;
927 #endif
928
929         if (sip == NULL) {
930                 ESSERT(0);
931                 return -EINVAL;
932         }
933
934         epub = sip->epub;
935
936 #ifdef ESP_PREALLOC
937         sip->tx_aggr_buf = (u8 *)esp_get_tx_aggr_buf();
938 #else
939         po = get_order(SIP_TX_AGGR_BUF_SIZE);
940         sip->tx_aggr_buf = (u8 *)__get_free_pages(GFP_ATOMIC, po);
941 #endif
942         if (sip->tx_aggr_buf == NULL) {
943                 esp_dbg(ESP_DBG_ERROR, "no mem for tx_aggr_buf! \n");
944                 return -ENOMEM;
945         }
946
947         sip->tx_aggr_write_ptr = sip->tx_aggr_buf;
948
949         sip->tx_blksz = bevt->tx_blksz;
950         sip->rx_blksz = bevt->rx_blksz;
951         sip->credit_to_reserve = bevt->credit_to_reserve;
952
953         sip->dump_rpbm_err = (bevt->options & SIP_DUMP_RPBM_ERR);
954         sip->rxabort_fixed = (bevt->options & SIP_RXABORT_FIXED);
955         sip->support_bgscan = (bevt->options & SIP_SUPPORT_BGSCAN);
956
957         sip->sendup_rpbm_pkt = sip->dump_rpbm_err && false;
958
959         /* print out MAC addr... */
960         memcpy(epub->mac_addr, bevt->mac_addr, ETH_ALEN);
961         atomic_set(&sip->noise_floor, bevt->noise_floor);
962
963         esp_sip_dbg(ESP_DBG_TRACE, "%s tx_blksz %d rx_blksz %d mac addr %pM\n", __func__, sip->tx_blksz, sip->rx_blksz, epub->mac_addr);
964
965         return 0;
966 }
967
968 /* write pkts in aggr buf to target memory */
969 static void sip_write_pkts(struct esp_sip *sip, int pm_state)
970 {
971         int tx_aggr_len = 0;
972         struct sip_hdr *first_shdr = NULL;
973         int err = 0;
974
975         tx_aggr_len = sip->tx_aggr_write_ptr - sip->tx_aggr_buf;
976         if (tx_aggr_len < sizeof(struct sip_hdr)) {
977                 printk("%s tx_aggr_len %d \n", __func__, tx_aggr_len);
978                 ESSERT(0);
979                 return;
980         }
981         if ((tx_aggr_len & 0x3) != 0) {
982                 ESSERT(0);
983                 return;
984         }
985
986         first_shdr = (struct sip_hdr *)sip->tx_aggr_buf;
987
988         if (atomic_read(&sip->tx_credits) <= SIP_CREDITS_LOW_THRESHOLD) {
989                 first_shdr->fc[1] |= SIP_HDR_F_NEED_CRDT_RPT;
990         }
991
992         /* still use lock bus instead of sif_lldesc_write_sync since we want to protect several global varibles assignments */
993         sif_lock_bus(sip->epub);
994
995         sif_raw_dummy_read(sip->epub,0);
996
997         err = esp_common_write(sip->epub, sip->tx_aggr_buf, tx_aggr_len, ESP_SIF_NOSYNC);
998
999         sip->tx_aggr_write_ptr = sip->tx_aggr_buf;
1000         sip->tx_tot_len = 0;
1001
1002         sif_unlock_bus(sip->epub);
1003
1004         if (err)
1005                 esp_sip_dbg(ESP_DBG_ERROR, "func %s err!!!!!!!!!: %d\n", __func__, err);
1006
1007 }
1008
1009 /* setup sip header and tx info, copy pkt into aggr buf */
1010 static int sip_pack_pkt(struct esp_sip *sip, struct sk_buff *skb, int *pm_state)
1011 {
1012         struct ieee80211_tx_info *itx_info;
1013         struct sip_hdr *shdr;
1014         u32 tx_len = 0, offset = 0;
1015         bool is_data = true;
1016
1017         itx_info = IEEE80211_SKB_CB(skb);
1018
1019         if (itx_info->flags == 0xffffffff) {
1020                 shdr = (struct sip_hdr *)skb->data;
1021                 is_data = false;
1022                 tx_len = skb->len;
1023         } else {
1024                 struct ieee80211_hdr * wh = (struct ieee80211_hdr *)skb->data;
1025                 struct esp_vif *evif = (struct esp_vif *)itx_info->control.vif->drv_priv;
1026                 u8 sta_index;
1027                 struct esp_node *node;  
1028                 /* update sip header */
1029                 shdr = (struct sip_hdr *)sip->tx_aggr_write_ptr;
1030                 
1031                 shdr->fc[0] = 0;
1032                 shdr->fc[1] = 0;
1033
1034                 if ((itx_info->flags & IEEE80211_TX_CTL_AMPDU) && (true || esp_is_ip_pkt(skb)))
1035                         SIP_HDR_SET_TYPE(shdr->fc[0], SIP_DATA_AMPDU);
1036                 else
1037                         SIP_HDR_SET_TYPE(shdr->fc[0], SIP_DATA);
1038
1039                 if(evif->epub == NULL){
1040 #ifndef FAST_TX_STATUS
1041                         /* TBD */
1042 #else
1043                         sip_tx_status_report(sip, skb, itx_info, false);
1044                         atomic_dec(&sip->tx_data_pkt_queued);
1045                         return -EINVAL;
1046 #endif /* FAST_TX_STATUS */
1047                 }
1048
1049                 /* make room for encrypted pkt */
1050                 if (itx_info->control.hw_key) {
1051 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
1052                         shdr->d_enc_flag= itx_info->control.hw_key->alg+1;
1053 #else
1054                         int alg = esp_cipher2alg(itx_info->control.hw_key->cipher);
1055                         if (unlikely(alg == -1)) {
1056 #ifndef FAST_TX_STATUS
1057                                 /* TBD */
1058 #else
1059                                 sip_tx_status_report(sip, skb, itx_info, false);
1060                                 atomic_dec(&sip->tx_data_pkt_queued);
1061                                 return -1;
1062 #endif /* FAST_TX_STATUS */
1063                         } else {
1064                                 shdr->d_enc_flag = alg + 1;
1065                         }
1066
1067 #endif /* NEW_KERNEL */
1068                          shdr->d_hw_kid =  itx_info->control.hw_key->hw_key_idx | (evif->index<<7);
1069                 } else {
1070                         shdr->d_enc_flag=0;
1071                         shdr->d_hw_kid = (evif->index << 7 | evif->index);
1072                 }
1073
1074                 /* update sip tx info */
1075 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
1076                 node = esp_get_node_by_addr(sip->epub, wh->addr1);
1077 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
1078                 if(itx_info->control.sta == NULL){
1079                         node = NULL;
1080                 } else {
1081                         node = esp_get_node_by_addr(sip->epub, itx_info->control.sta->addr);
1082                 }
1083 #else
1084                 
1085                 node = esp_get_node_by_addr(sip->epub, wh->addr1);
1086 #endif
1087                 if(node != NULL)
1088                         sta_index = node->index;
1089                 else
1090                         sta_index = ESP_PUB_MAX_STA + 1;
1091                 SIP_HDR_SET_IFIDX(shdr->fc[0], evif->index << 3 | sta_index);
1092 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1093                 shdr->d_p2p = itx_info->control.vif->p2p;
1094                 if(evif->index == 1)
1095                         shdr->d_p2p = 1;
1096 #endif
1097                 shdr->d_ac = skb_get_queue_mapping(skb);
1098                 shdr->d_tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
1099                 wh = (struct ieee80211_hdr *)skb->data;
1100                 if (ieee80211_is_mgmt(wh->frame_control)) {
1101                 /* addba/delba/bar may use different tid/ac */
1102                         if (shdr->d_ac == WME_AC_VO) {
1103                                 shdr->d_tid = 7;
1104                         }
1105                         if (ieee80211_is_beacon(wh->frame_control)) {
1106                                 shdr->d_tid = 8;
1107                                 shdr->d_ac = 4;
1108                         }
1109                 }
1110 //#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
1111                 if (check_ac_tid(skb->data, shdr->d_ac, shdr->d_tid)) {
1112                         shdr->d_ac = WME_AC_BE;
1113                         shdr->d_tid = 0;
1114                 }
1115 //#endif  /* NEW_KERNEL || KERNEL_35 */
1116
1117
1118                 /* make sure data is start at 4 bytes aligned addr. */
1119                 offset = roundup(sizeof(struct sip_hdr), 4);
1120
1121 #ifdef HOST_RC
1122                 esp_sip_dbg(ESP_DBG_TRACE, "%s offset0 %d \n", __func__, offset);
1123                 memcpy(sip->tx_aggr_write_ptr + offset, (void *)&itx_info->control,
1124                        sizeof(struct sip_tx_rc));
1125
1126                 offset += roundup(sizeof(struct sip_tx_rc), 4);
1127                 esp_show_tx_rates(&itx_info->control.rates[0]);
1128
1129 #endif /* HOST_RC */
1130
1131                 if (SIP_HDR_IS_AMPDU(shdr)) {
1132                         memset(sip->tx_aggr_write_ptr + offset, 0, sizeof(struct esp_tx_ampdu_entry));
1133                         offset += roundup(sizeof(struct esp_tx_ampdu_entry), 4);
1134                 }
1135
1136                 tx_len = offset + skb->len;
1137                 shdr->len = tx_len;  /* actual len */
1138
1139                 esp_sip_dbg(ESP_DBG_TRACE, "%s offset %d skblen %d txlen %d\n", __func__, offset, skb->len, tx_len);
1140
1141         }
1142
1143         shdr->seq = sip->txseq++;
1144         //esp_sip_dbg(ESP_DBG_ERROR, "%s seq %u, %u %u\n", __func__, shdr->seq, SIP_HDR_GET_TYPE(shdr->fc[0]),shdr->c_cmdid);
1145
1146         /* copy skb to aggr buf */
1147         memcpy(sip->tx_aggr_write_ptr + offset, skb->data, skb->len);
1148
1149         if (is_data) {
1150                         spin_lock_bh(&sip->epub->tx_lock);
1151                         sip->txdataseq = shdr->seq;
1152                         spin_unlock_bh(&sip->epub->tx_lock);
1153 #ifndef FAST_TX_STATUS
1154                 /* store seq in driver data, need seq to pick pkt during tx status report */
1155                 *(u32 *)itx_info->driver_data = shdr->seq;
1156                 atomic_inc(&sip->pending_tx_status);
1157 #else
1158                 /* fake a tx_status and report to mac80211 stack to speed up tx, may affect
1159                  *  1) rate control (now it's all in target, so should be OK)
1160                  *  2) ps mode, mac80211 want to check ACK of ps/nulldata to see if AP is awake
1161                  *  3) BAR, mac80211 do BAR by checking ACK
1162                  */
1163                 /*
1164                  *  XXX: need to adjust for 11n, e.g. report tx_status according to BA received in target
1165                  *
1166                  */
1167                 sip_tx_status_report(sip, skb, itx_info, true);
1168                 atomic_dec(&sip->tx_data_pkt_queued);
1169
1170 #endif /* FAST_TX_STATUS */
1171                 STRACE_TX_DATA_INC();
1172         } else {
1173                 /* check pm state here */
1174
1175                /* no need to hold ctrl skb */
1176                 sip_free_ctrl_skbuff(sip, skb);
1177                 STRACE_TX_CMD_INC();
1178         }
1179
1180         /* TBD: roundup here or whole aggr-buf */
1181         tx_len = roundup(tx_len, sip->tx_blksz);
1182
1183         sip->tx_aggr_write_ptr += tx_len;
1184         sip->tx_tot_len += tx_len;
1185
1186         return 0;
1187 }
1188
1189 #ifndef FAST_TX_STATUS
1190 static void
1191 sip_after_tx_status_update(struct esp_sip *sip)
1192 {
1193         if (atomic_read(&sip->data_tx_stopped) == true && sip_tx_data_may_resume(sip)) {
1194                 atomic_set(&sip->data_tx_stopped, false);
1195                 if (sip_is_tx_mblk_avail(sip) == false) {
1196                         esp_sip_dbg(ESP_DBG_ERROR, "%s mblk still unavail \n", __func__);
1197                 } else {
1198                         esp_sip_dbg(ESP_DBG_TRACE, "%s trigger txq \n", __func__);
1199                         sip_trigger_txq_process(sip);
1200                 }
1201         } else if (!sip_tx_data_may_resume(sip)) { //JLU: this is redundant
1202                 STRACE_SHOW(sip);
1203         }
1204 }
1205 #endif /* !FAST_TX_STATUS */
1206
1207 #ifdef HOST_RC
1208 static void sip_set_tx_rate_status(struct sip_rc_status *rcstatus, struct ieee80211_tx_rate *irates)
1209 {
1210         int i;
1211         u8 shift = 0;
1212         u32 cnt = 0;
1213
1214         for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
1215                 if (rcstatus->rc_map & BIT(i)) {
1216                         shift = i << 2;
1217                         cnt = (rcstatus->rc_cnt_store >> shift) & RC_CNT_MASK;
1218                         irates[i].idx = i;
1219                         irates[i].count = (u8)cnt;
1220                 } else {
1221                         irates[i].idx = -1;
1222                         irates[i].count = 0;
1223                 }
1224         }
1225
1226         esp_show_rcstatus(rcstatus);
1227         esp_show_tx_rates(irates);
1228 }
1229 #endif /* HOST_RC */
1230
1231 #ifndef FAST_TX_STATUS
1232 static void
1233 sip_txdoneq_process(struct esp_sip *sip, struct sip_evt_tx_report *tx_report)
1234 {
1235         struct sk_buff *skb, *tmp;
1236         struct esp_pub *epub = sip->epub;
1237         int matchs = 0;
1238         struct ieee80211_tx_info *tx_info;
1239         struct sip_tx_status *tx_status;
1240         int i;
1241
1242         esp_sip_dbg(ESP_DBG_LOG, "%s enter, report->pkts %d, pending tx_status %d\n", __func__, tx_report->pkts, atomic_read(&sip->pending_tx_status));
1243
1244         /* traversal the txdone queue, find out matched skb by seq, hand over
1245          * to up layer stack
1246          */
1247         for (i = 0; i < tx_report->pkts; i++) {
1248                 //esp_sip_dbg(ESP_DBG_TRACE, "%s status %d seq %u\n", __func__, i, tx_report->status[i].sip_seq);
1249                 skb_queue_walk_safe(&epub->txdoneq, skb, tmp) {
1250                         tx_info = IEEE80211_SKB_CB(skb);
1251
1252                         //esp_sip_dbg(ESP_DBG_TRACE, "%s skb seq %u\n", __func__, *(u32 *)tx_info->driver_data);
1253                         if (tx_report->status[i].sip_seq == *(u32 *)tx_info->driver_data) {
1254                                 tx_status = &tx_report->status[i];
1255                                 __skb_unlink(skb, &epub->txdoneq);
1256
1257                                 //fill up ieee80211_tx_info
1258                                 //TBD: lock ??
1259                                 if (tx_status->errno == SIP_TX_ST_OK &&
1260                                     !(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1261                                         tx_info->flags |= IEEE80211_TX_STAT_ACK;
1262                                 }
1263 #ifdef HOST_RC
1264                                 sip_set_tx_rate_status(&tx_report->status[i].rcstatus, &tx_info->status.rates[0]);
1265                                 esp_sip_dbg(ESP_DBG_TRACE, "%s idx0 %d, cnt0 %d, flags0 0x%02x\n", __func__, tx_info->status.rates[0].idx,tx_info->status.rates[0].count, tx_info->status.rates[0].flags);
1266
1267 #else
1268                                 /* manipulate rate status... */
1269                                 tx_info->status.rates[0].idx = 0;
1270                                 tx_info->status.rates[0].count = 1;
1271                                 tx_info->status.rates[0].flags = 0;
1272                                 tx_info->status.rates[1].idx = -1;
1273 #endif /* HOST_RC */
1274
1275                                 ieee80211_tx_status(epub->hw, skb);
1276                                 matchs++;
1277                                 atomic_dec(&sip->pending_tx_status);
1278                                 STRACE_RX_TXSTATUS_INC();
1279                         }
1280                 }
1281         }
1282
1283         if (matchs < tx_report->pkts) {
1284                 esp_sip_dbg(ESP_DBG_ERROR, "%s tx report mismatch! \n", __func__);
1285         } else {
1286                 //esp_sip_dbg(ESP_DBG_TRACE, "%s tx report %d pkts! \n", __func__, matchs);
1287         }
1288
1289         sip_after_tx_status_update(sip);
1290 }
1291 #else
1292 #ifndef FAST_TX_NOWAIT
1293
1294 static void
1295 sip_txdoneq_process(struct esp_sip *sip)
1296 {
1297         struct esp_pub *epub = sip->epub;
1298         struct sk_buff *skb;
1299         while ((skb = skb_dequeue(&epub->txdoneq))) {
1300                 ieee80211_tx_status(epub->hw, skb);
1301         }
1302 }
1303 #endif
1304 #endif /* !FAST_TX_STATUS */
1305
1306 #ifdef FAST_TX_STATUS
1307 static void sip_tx_status_report(struct esp_sip *sip, struct sk_buff *skb, struct ieee80211_tx_info *tx_info, bool success)
1308 {
1309         if(!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
1310                 if (likely(success))
1311                         tx_info->flags |= IEEE80211_TX_STAT_ACK;
1312                 else
1313                         tx_info->flags &= ~IEEE80211_TX_STAT_ACK;
1314
1315 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
1316                 /* manipulate rate status... */
1317                 tx_info->status.rates[0].idx = 11;
1318                 tx_info->status.rates[0].count = 1;
1319                 tx_info->status.rates[0].flags = 0;
1320                 tx_info->status.rates[1].idx = -1;
1321 #else
1322                 tx_info->status.retry_count = 1;
1323                 tx_info->status.excessive_retries = false;
1324 #endif
1325
1326         } else {
1327                 tx_info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_ACK;
1328 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
1329                 tx_info->status.ampdu_ack_map = 1;
1330 #else
1331                 tx_info->status.ampdu_len = 1;
1332 #endif
1333                 tx_info->status.ampdu_ack_len = 1;
1334
1335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
1336                 /* manipulate rate status... */
1337                 tx_info->status.rates[0].idx = 7;
1338                 tx_info->status.rates[0].count = 1;
1339                 tx_info->status.rates[0].flags = IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_SHORT_GI;
1340                 tx_info->status.rates[1].idx = -1;
1341 #else
1342                 tx_info->status.retry_count = 1;
1343                 tx_info->status.excessive_retries = false;
1344 #endif
1345
1346         }
1347
1348         if(tx_info->flags & IEEE80211_TX_STAT_AMPDU)
1349                 esp_sip_dbg(ESP_DBG_TRACE, "%s ampdu status! \n", __func__);
1350
1351         if (!mod_support_no_txampdu() &&
1352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
1353                 cfg80211_get_chandef_type(&sip->epub->hw->conf.chandef) != NL80211_CHAN_NO_HT
1354 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
1355                 sip->epub->hw->conf.channel_type != NL80211_CHAN_NO_HT
1356 #else
1357                 !(sip->epub->hw->conf.flags&IEEE80211_CONF_SUPPORT_HT_MODE)
1358 #endif
1359                 ) {
1360                 struct ieee80211_tx_info * tx_info = IEEE80211_SKB_CB(skb);
1361                 struct ieee80211_hdr * wh = (struct ieee80211_hdr *)skb->data;
1362                 if(ieee80211_is_data_qos(wh->frame_control)) {
1363                         if(!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
1364                                 u8 tidno = ieee80211_get_qos_ctl(wh)[0] & IEEE80211_QOS_CTL_TID_MASK;
1365                                 struct esp_node * node;
1366                                 struct esp_tx_tid *tid;
1367 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) 
1368                                 struct ieee80211_sta *sta;
1369
1370                                 node = esp_get_node_by_addr(sip->epub, wh->addr1);
1371                                 if(node == NULL)
1372                                         goto _exit;
1373                                 if(node->sta == NULL)
1374                                         goto _exit;
1375                                 sta = node->sta;
1376 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
1377                                 struct ieee80211_sta *sta;
1378                                 sta = tx_info->control.sta;
1379                                 if(sta == NULL)
1380                                         goto _exit;
1381                                 node = (struct esp_node *)sta->drv_priv;
1382                                 if(node == NULL){
1383                                         ESSERT(0);
1384                                         goto _exit;
1385                                 }
1386                                 if(node->sta == NULL)
1387                                         goto _exit;
1388                                 if(!sta->ht_cap.ht_supported)
1389                                         goto _exit;
1390 #else
1391                                 node = esp_get_node_by_addr(sip->epub, wh->addr1);
1392                                 if(node == NULL)
1393                                         goto _exit;
1394                                 if(!node->ht_info.ht_supported)
1395                                         goto _exit;
1396 #endif
1397                                 tid = &node->tid[tidno];
1398                                 spin_lock_bh(&sip->epub->tx_ampdu_lock);
1399                                 //start session
1400                                 if(tid == NULL){
1401                                         spin_unlock_bh(&sip->epub->tx_ampdu_lock);
1402                                         ESSERT(0);
1403                                         goto _exit;
1404                                 }
1405                                 if ((tid->state == ESP_TID_STATE_INIT) && 
1406                                                 (TID_TO_AC(tidno) != WME_AC_VO) && tid->cnt >= 10) {
1407                                         tid->state = ESP_TID_STATE_TRIGGER;
1408                                         esp_sip_dbg(ESP_DBG_ERROR, "start tx ba session,addr:%pM,tid:%u\n", wh->addr1, tidno);
1409                                         spin_unlock_bh(&sip->epub->tx_ampdu_lock);
1410 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28))
1411                                         ieee80211_start_tx_ba_session(sip->epub->hw, wh->addr1, tidno);
1412 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32))
1413                                         ieee80211_start_tx_ba_session(sip->epub->hw, sta->addr, tidno);
1414 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 37))
1415                                         ieee80211_start_tx_ba_session(sta, tidno);
1416 #else
1417                                         ieee80211_start_tx_ba_session(sta, tidno, 0);
1418 #endif
1419                                 } else {
1420                                         if(tid->state == ESP_TID_STATE_INIT)
1421                                                 tid->cnt++;
1422                                         else
1423                                                 tid->cnt = 0;
1424                                         spin_unlock_bh(&sip->epub->tx_ampdu_lock);
1425                                 }
1426                         }
1427                 }
1428         }
1429 _exit:
1430 #ifndef FAST_TX_NOWAIT 
1431         skb_queue_tail(&sip->epub->txdoneq, skb);
1432 #else
1433         ieee80211_tx_status(sip->epub->hw, skb);
1434 #endif
1435 }
1436 #endif /* FAST_TX_STATUS */
1437
1438 /*
1439  *  NB: this routine should be locked when calling
1440  */
1441 void
1442 sip_txq_process(struct esp_pub *epub)
1443 {
1444         struct sk_buff *skb;
1445         struct esp_sip *sip = epub->sip;
1446         u32 pkt_len = 0, tx_len = 0, blknum = 0;
1447         bool queued_back = false;
1448         bool out_of_credits = false;
1449         struct ieee80211_tx_info *itx_info;
1450         int pm_state = 0;
1451         
1452         while ((skb = skb_dequeue(&epub->txq))) {
1453
1454                 /* cmd skb->len does not include sip_hdr too */
1455                 pkt_len = skb->len;
1456                 itx_info = IEEE80211_SKB_CB(skb);
1457                 if (itx_info->flags != 0xffffffff) {
1458                         pkt_len += roundup(sizeof(struct sip_hdr), 4);
1459                         if ((itx_info->flags & IEEE80211_TX_CTL_AMPDU) && (true || esp_is_ip_pkt(skb)))
1460                                 pkt_len += roundup(sizeof(struct esp_tx_ampdu_entry), 4);
1461                 }
1462
1463                 /* current design simply requires every sip_hdr must be at the begin of mblk, that definitely
1464                  * need to be optimized, e.g. calulate remain length in the previous mblk, if it larger than
1465                  * certain threshold (e.g, whole pkt or > 50% of pkt or 2 x sizeof(struct sip_hdr), append pkt
1466                  * to the previous mblk.  This might be done in sip_pack_pkt()
1467                  */
1468                 pkt_len = roundup(pkt_len, sip->tx_blksz);
1469                 blknum = pkt_len / sip->tx_blksz;
1470                 esp_dbg(ESP_DBG_TRACE, "%s skb_len %d pkt_len %d blknum %d\n", __func__, skb->len, pkt_len, blknum);
1471
1472                 if (unlikely(blknum > atomic_read(&sip->tx_credits) - sip->credit_to_reserve)) {
1473                         esp_dbg(ESP_DBG_TRACE, "%s out of credits!\n", __func__);
1474                         STRACE_TX_OUT_OF_CREDIT_INC();
1475                         queued_back = true;
1476                         out_of_credits = true;
1477                         /*
1478                         if (epub->hw) {
1479                                 ieee80211_stop_queues(epub->hw);
1480                                 atomic_set(&epub->txq_stopped, true);
1481                         }
1482                         */
1483                         /* we will be back */
1484                         break;
1485                 }
1486
1487                 tx_len += pkt_len;
1488                 if (tx_len >= SIP_TX_AGGR_BUF_SIZE) {
1489                         /* do we need to have limitation likemax 8 pkts in a row? */
1490                         esp_dbg(ESP_DBG_TRACE, "%s too much pkts in one shot!\n", __func__);
1491                         STRACE_TX_ONE_SHOT_INC();
1492                         tx_len -= pkt_len;
1493                         queued_back = true;
1494                         break;
1495                 }
1496
1497                 if (sip_pack_pkt(sip, skb, &pm_state) != 0) {
1498                         /* wrong pkt, won't send to target */
1499                         tx_len -= pkt_len;
1500                         continue;
1501                 }
1502
1503                 esp_sip_dbg(ESP_DBG_TRACE, "%s:before sub, credits is %d\n", __func__, atomic_read(&sip->tx_credits));
1504                 atomic_sub(blknum, &sip->tx_credits);
1505                 esp_sip_dbg(ESP_DBG_TRACE, "%s:after sub %d,credits remains %d\n", __func__, blknum, atomic_read(&sip->tx_credits));
1506
1507         }
1508
1509         if (queued_back) {
1510                 skb_queue_head(&epub->txq, skb);
1511         }
1512
1513         if (atomic_read(&sip->state) == SIP_STOP 
1514 #ifdef HOST_RESET_BUG
1515                 || atomic_read(&epub->wl.off) == 1
1516 #endif
1517                 )
1518         {
1519                 queued_back = 1;
1520                 tx_len = 0;
1521                 sip_after_write_pkts(sip);
1522         }
1523
1524         if (tx_len) {
1525         
1526                 sip_write_pkts(sip, pm_state);
1527
1528                 sip_after_write_pkts(sip);
1529         }
1530
1531         if (queued_back && !out_of_credits) {
1532
1533                 /* skb pending, do async process again */
1534                 //      if (!skb_queue_empty(&epub->txq))
1535                 sip_trigger_txq_process(sip);
1536         }
1537 }
1538
1539 static void sip_after_write_pkts(struct esp_sip *sip)
1540 {
1541         //enable txq
1542 #if 0
1543         if (atomic_read(&sip->epub->txq_stopped) == true && sip_queue_may_resume(sip)) {
1544                 atomic_set(&sip->epub->txq_stopped, false);
1545                 ieee80211_wake_queues(sip->epub->hw);
1546                 esp_sip_dbg(ESP_DBG_TRACE, "%s resume ieee80211 tx \n", __func__);
1547         }
1548 #endif
1549
1550 #ifndef FAST_TX_NOWAIT
1551         sip_txdoneq_process(sip);
1552 #endif
1553         //disable tx_data
1554 #ifndef FAST_TX_STATUS
1555         if (atomic_read(&sip->data_tx_stopped) == false && sip_tx_data_need_stop(sip)) {
1556                 esp_sip_dbg(ESP_DBG_TRACE, "%s data_tx_stopped \n", __func__);
1557                 atomic_set(&sip->data_tx_stopped, true);
1558         }
1559 #endif /* FAST_TX_STATUS */
1560 }
1561
1562 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
1563 /*
1564  * old mac80211 (2.6.32.x) needs payload is 4 byte aligned, thus we need this hack.
1565  * TBD: However, the latest mac80211 stack does not need this. we may
1566  * need to check kernel version here...
1567  */
1568 static void sip_check_skb_alignment(struct sk_buff *skb)
1569 {
1570         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1571         int hdrlen;
1572
1573         hdrlen = ieee80211_hdrlen(hdr->frame_control);
1574
1575 #if 0
1576
1577         /* TBD */
1578         if (rx->flags & IEEE80211_RX_AMSDU)
1579                 hdrlen += ETH_HLEN;
1580
1581 #endif
1582
1583         if (unlikely(((unsigned long)(skb->data + hdrlen)) & 3)) {
1584
1585                 esp_sip_dbg(ESP_DBG_TRACE, "%s adjust skb data postion \n", __func__);
1586                 skb_push(skb, 2);
1587                 memmove(skb->data, skb->data+2, skb->len-2);
1588                 skb_trim(skb, skb->len-2);
1589         }
1590 }
1591 #endif /* !NEW_KERNEL */
1592
1593 int sip_channel_value_inconsistency(u8 *start, size_t len, unsigned channel)
1594 {
1595         size_t left = len;
1596         u8 *pos = start;
1597         u8 *DS_Param = NULL;
1598         u8 channel_parsed = 0xff;
1599         bool found = false;
1600         u8 ssid[33];
1601
1602         while(left >=2 && !found) {
1603                 u8 id, elen;
1604                 id = *pos++;
1605                 elen = *pos++;
1606                 left -= 2;
1607
1608                 if(elen > left)
1609                         break;
1610
1611                 switch (id) {
1612                 case WLAN_EID_SSID:
1613                         if (elen >= 33) {
1614                                 esp_dbg(ESP_DBG_ERROR, "SSID to long\n");
1615                                 //show_buf(start-36, 256);
1616                                 return -1;
1617                         }
1618                         memcpy(ssid, pos, elen);
1619                         ssid[elen] = 0;
1620                         esp_sip_dbg(ESP_DBG_TRACE, "ssid:%s\n", ssid);
1621                         break;
1622                 case WLAN_EID_SUPP_RATES:
1623                         break;
1624                 case WLAN_EID_FH_PARAMS:
1625                         break;
1626                 case WLAN_EID_DS_PARAMS:
1627                         DS_Param = pos;
1628                         found = true;
1629                         break;
1630                 default:
1631                         break;
1632                 }
1633
1634                 left -= elen;
1635                 pos += elen;
1636         }
1637
1638         if (DS_Param) {
1639                 channel_parsed = DS_Param[0];
1640         } else {
1641                 esp_dbg(ESP_DBG_ERROR, "DS_Param not found\n");
1642                 //show_buf(start-36, 256);
1643                 return -1;
1644         }
1645
1646         return channel_parsed != channel;
1647 }
1648
1649 /*  parse mac_rx_ctrl and return length */
1650 static int sip_parse_mac_rx_info(struct esp_sip *sip, struct esp_mac_rx_ctrl * mac_ctrl, struct sk_buff *skb)
1651 {
1652         struct ieee80211_rx_status *rx_status = NULL;
1653         struct ieee80211_hdr *hdr;
1654
1655 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
1656         rx_status = IEEE80211_SKB_RXCB(skb);
1657 #else
1658         rx_status = (struct ieee80211_rx_status *)skb->cb;
1659 #endif
1660         rx_status->freq = esp_ieee2mhz(mac_ctrl->channel);
1661
1662 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
1663         rx_status->signal = mac_ctrl->rssi + mac_ctrl->noise_floor;  /* snr actually, need to offset noise floor e.g. -85 */
1664 #else
1665         rx_status->signal = mac_ctrl->rssi;  /* snr actually, need to offset noise floor e.g. -85 */
1666 #endif /* NEW_KERNEL */
1667
1668         hdr = (struct ieee80211_hdr *)skb->data;
1669         if (mac_ctrl->damatch0 == 1 && mac_ctrl->bssidmatch0 == 1        /*match bssid and da, but beacon package contain other bssid*/
1670                          && strncmp(hdr->addr2, sip->epub->wl.bssid, ETH_ALEN) == 0) { /* force match addr2 */
1671                 if (++signal_loop >= SIGNAL_COUNT) {
1672                         avg_signal += rx_status->signal;
1673                         avg_signal /= SIGNAL_COUNT;
1674                         old_signal = rx_status->signal = (avg_signal + 5);
1675                         signal_loop = 0;
1676                         avg_signal = 0;
1677                 } else {
1678                         avg_signal += rx_status->signal;
1679                         rx_status->signal = old_signal;
1680                 }
1681         }
1682
1683 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
1684 #define ESP_RSSI_MIN_RSSI (-90)
1685 #define ESP_RSSI_MAX_RSSI (-45)
1686         rx_status->noise = 0;  /* TBD */
1687         rx_status->qual = (mac_ctrl->rssi - ESP_RSSI_MIN_RSSI)* 100/(ESP_RSSI_MAX_RSSI - ESP_RSSI_MIN_RSSI);
1688         rx_status->qual = min(rx_status->qual, 100);
1689         rx_status->qual = max(rx_status->qual, 0);
1690 #undef ESP_RSSI_MAX_RSSI
1691 #undef ESP_RSSI_MIN_RSSI
1692 #endif /* !NEW_KERNEL && KERNEL_35*/
1693         rx_status->antenna = 0;  /* one antenna for now */
1694         rx_status->band = IEEE80211_BAND_2GHZ;
1695         rx_status->flag = RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
1696         if (mac_ctrl->sig_mode) {
1697             // 2.6.27 has RX_FLAG_RADIOTAP in enum mac80211_rx_flags in include/net/mac80211.h
1698 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))                
1699                 rx_status->flag |= RX_FLAG_HT;
1700                 rx_status->rate_idx = mac_ctrl->MCS;
1701                 if(mac_ctrl->SGI)
1702                         rx_status->flag |= RX_FLAG_SHORT_GI;
1703 #else
1704                 rx_status->rate_idx = esp_wmac_rate2idx(0xc);//ESP_RATE_54
1705 #endif
1706         } else {
1707                 rx_status->rate_idx = esp_wmac_rate2idx(mac_ctrl->rate);
1708         }
1709         if (mac_ctrl->rxend_state == RX_FCS_ERR)
1710                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1711
1712         /* Mic error frame flag */
1713         if (mac_ctrl->rxend_state == RX_TKIPMIC_ERR || mac_ctrl->rxend_state == RX_CCMPMIC_ERR){
1714                 if(atomic_read(&sip->epub->wl.tkip_key_set) == 1){
1715                         rx_status->flag|= RX_FLAG_MMIC_ERROR;
1716                         atomic_set(&sip->epub->wl.tkip_key_set, 0);
1717                         printk("mic err\n");
1718                 } else {
1719                         printk("mic err discard\n");
1720                 }
1721         }
1722
1723         //esp_dbg(ESP_DBG_LOG, "%s freq: %u; signal: %d;  rate_idx %d; flag: %d \n", __func__, rx_status->freq, rx_status->signal, rx_status->rate_idx, rx_status->flag);
1724
1725         do {
1726                 struct ieee80211_hdr * wh = (struct ieee80211_hdr *)((u8 *)skb->data);
1727
1728                 if (ieee80211_is_beacon(wh->frame_control) || ieee80211_is_probe_resp(wh->frame_control)) {
1729                         struct ieee80211_mgmt * mgmt = (struct ieee80211_mgmt *)((u8 *)skb->data);
1730                         u8 *start = NULL;
1731                         size_t baselen, len = skb->len;
1732                         int inconsistency = 0;
1733
1734                         if (ieee80211_is_beacon(wh->frame_control)) {
1735                                 start = mgmt->u.beacon.variable;
1736                                 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
1737                                 bcn_counter++;
1738                         } else {
1739                                 start = mgmt->u.probe_resp.variable;
1740                                 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
1741                                 probe_rsp_counter++;
1742                         }
1743
1744                         if (baselen > len)
1745                                 return -1;
1746
1747                         inconsistency = sip_channel_value_inconsistency(start, len-baselen, mac_ctrl->channel);
1748
1749                         if (inconsistency) {
1750                                 return -1;
1751                         }
1752                 }
1753
1754 #ifdef KERNEL_IV_WAR
1755                 /* some kernel e.g. 3.0.8 wrongly handles non-encrypted pkt like eapol */
1756                 if (ieee80211_is_data(wh->frame_control)) {
1757                         if( !ieee80211_has_protected(wh->frame_control)) {
1758                                 esp_sip_dbg(ESP_DBG_TRACE, "%s kiv_war, add iv_stripped flag \n", __func__);
1759                                 rx_status->flag |= RX_FLAG_IV_STRIPPED;
1760                         } else {
1761                                 if ((atomic_read(&sip->epub->wl.ptk_cnt) == 0 && !(wh->addr1[0] & 0x1)) || 
1762                                         (atomic_read(&sip->epub->wl.gtk_cnt) == 0 && (wh->addr1[0] & 0x1))) 
1763                                 {
1764                                         esp_dbg(ESP_DBG_TRACE, "%s ==kiv_war, got bogus enc pkt==\n", __func__);
1765                                         rx_status->flag |= RX_FLAG_IV_STRIPPED;
1766                                         //show_buf(skb->data, 32);
1767                                 }
1768
1769                                 esp_sip_dbg(ESP_DBG_TRACE, "%s kiv_war, got enc pkt \n", __func__);
1770                         }
1771                 }
1772 #endif /* KERNEL_IV_WAR*/
1773         } while (0);
1774
1775         return 0;
1776 }
1777
1778 static struct esp_mac_rx_ctrl *sip_parse_normal_mac_ctrl(struct sk_buff *skb, int * pkt_len_enc, int *buf_len, int *pulled_len) 
1779 {
1780         struct esp_mac_rx_ctrl *mac_ctrl = NULL;
1781         struct sip_hdr *hdr =(struct sip_hdr *)skb->data;
1782         int len_in_hdr = hdr->len;
1783
1784         ESSERT(skb != NULL);
1785         ESSERT(skb->len > SIP_MIN_DATA_PKT_LEN);
1786
1787         skb_pull(skb, sizeof(struct sip_hdr));
1788         *pulled_len += sizeof(struct sip_hdr);
1789         mac_ctrl = (struct esp_mac_rx_ctrl *)skb->data;
1790         if(!mac_ctrl->Aggregation) {
1791                 ESSERT(pkt_len_enc != NULL);
1792                 ESSERT(buf_len != NULL);
1793                 *pkt_len_enc = (mac_ctrl->sig_mode?mac_ctrl->HT_length:mac_ctrl->legacy_length) - FCS_LEN;
1794                 *buf_len = len_in_hdr - sizeof(struct sip_hdr) - sizeof(struct esp_mac_rx_ctrl);
1795         }
1796         skb_pull(skb, sizeof(struct esp_mac_rx_ctrl));
1797         *pulled_len += sizeof(struct esp_mac_rx_ctrl);
1798
1799         return mac_ctrl;
1800 }
1801
1802 /*
1803  * for one MPDU (including subframe in AMPDU)
1804  *
1805  */
1806 static struct sk_buff * sip_parse_data_rx_info(struct esp_sip *sip, struct sk_buff *skb, int pkt_len_enc, int buf_len, struct esp_mac_rx_ctrl *mac_ctrl, int *pulled_len) {
1807         /*
1808          *   | mac_rx_ctrl | real_data_payload | ampdu_entries |
1809          */
1810         //without enc
1811         int pkt_len = 0;
1812         struct sk_buff *rskb = NULL;
1813         int ret;
1814
1815         if (mac_ctrl->Aggregation) {
1816                 struct ieee80211_hdr * wh = (struct ieee80211_hdr *)skb->data;
1817                 pkt_len = pkt_len_enc;
1818                 if (ieee80211_has_protected(wh->frame_control))//ampdu, it is CCMP enc
1819                         pkt_len -= 8;
1820                 buf_len = roundup(pkt_len, 4);
1821         } else
1822                 pkt_len  = buf_len - 3 + ((pkt_len_enc - 1) & 0x3);
1823         esp_dbg(ESP_DBG_TRACE, "%s pkt_len %u, pkt_len_enc %u!, delta %d \n", __func__, pkt_len, pkt_len_enc, pkt_len_enc - pkt_len);
1824         do {
1825 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
1826                 rskb = __dev_alloc_skb(pkt_len_enc + 2, GFP_ATOMIC);
1827 #else
1828                 rskb = __dev_alloc_skb(pkt_len_enc, GFP_ATOMIC);
1829 #endif/* NEW_KERNEL */
1830                 if (unlikely(rskb == NULL)) {
1831                         esp_sip_dbg(ESP_DBG_ERROR, "%s no mem for rskb\n", __func__);
1832                         return NULL;
1833                 }
1834                 skb_put(rskb, pkt_len_enc);
1835         } while(0);
1836
1837         do {
1838 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
1839                 do {
1840                         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1841                         int hdrlen;
1842
1843                         hdrlen = ieee80211_hdrlen(hdr->frame_control);
1844                         if (unlikely(((unsigned long)(rskb->data + hdrlen)) & 3)) {
1845                                 skb_put(rskb, 2);
1846                                 skb_pull(rskb, 2);
1847                         }
1848                 } while(0);
1849 #endif /* < KERNEL_VERSION(2, 6, 39) */
1850                 memcpy(rskb->data, skb->data, pkt_len);
1851                 if (pkt_len_enc > pkt_len) {
1852                         memset(rskb->data + pkt_len, 0, pkt_len_enc - pkt_len);
1853                 }
1854                 /* strip out current pkt, move to the next one */
1855                 skb_pull(skb, buf_len);
1856                 *pulled_len += buf_len;
1857         } while (0);
1858
1859         ret = sip_parse_mac_rx_info(sip, mac_ctrl, rskb);
1860         if(ret == -1 && !mac_ctrl->Aggregation) {
1861                 kfree_skb(rskb);
1862                 return NULL;
1863         }
1864
1865         esp_dbg(ESP_DBG_LOG, "%s after pull headers, skb->len %d rskb->len %d \n", __func__, skb->len, rskb->len);
1866
1867         return rskb;
1868 }
1869
1870 struct esp_sip * sip_attach(struct esp_pub *epub) 
1871 {
1872         struct esp_sip *sip = NULL;
1873         struct sip_pkt *pkt = NULL;
1874         int i;
1875
1876         sip = kzalloc(sizeof(struct esp_sip), GFP_KERNEL);
1877         if (sip == NULL)
1878                 return NULL;
1879
1880 #ifdef ESP_RX_COPYBACK_TEST
1881         /* alloc 64KB for rx test */
1882         copyback_buf = kzalloc(0x10000, GFP_KERNEL);
1883 #endif /* ESP_RX_COPYBACK_TEST */
1884
1885         spin_lock_init(&sip->lock);
1886
1887         INIT_LIST_HEAD(&sip->free_ctrl_txbuf);
1888         INIT_LIST_HEAD(&sip->free_ctrl_rxbuf);
1889
1890         for (i = 0; i < SIP_CTRL_BUF_N; i++) {
1891                 pkt = kzalloc(sizeof(struct sip_pkt), GFP_KERNEL);
1892
1893                 if (!pkt) break;
1894
1895                 pkt->buf_begin = kzalloc(SIP_CTRL_BUF_SZ, GFP_KERNEL);
1896
1897                 if (pkt->buf_begin == NULL) {
1898                         kfree(pkt);
1899                         break;
1900                 }
1901
1902                 pkt->buf_len = SIP_CTRL_BUF_SZ;
1903                 pkt->buf = pkt->buf_begin;
1904
1905                 if (i < SIP_CTRL_TXBUF_N) {
1906                         list_add_tail(&pkt->list, &sip->free_ctrl_txbuf);
1907                 } else {
1908                         list_add_tail(&pkt->list, &sip->free_ctrl_rxbuf);
1909                 }
1910         }
1911
1912         mutex_init(&sip->rx_mtx);
1913         skb_queue_head_init(&sip->rxq);
1914 #ifndef RX_SYNC
1915         INIT_WORK(&sip->rx_process_work, sip_rxq_process);
1916 #endif/* RX_SYNC */
1917
1918         sip->epub = epub;
1919         atomic_set(&sip->noise_floor, -96);
1920
1921         atomic_set(&sip->state, SIP_INIT);
1922         atomic_set(&sip->tx_credits, 0);
1923
1924         return sip;
1925 }
1926
1927 static void sip_free_init_ctrl_buf(struct esp_sip *sip)
1928 {
1929         struct sip_pkt *pkt, *tpkt;
1930
1931         list_for_each_entry_safe(pkt, tpkt,
1932                                  &sip->free_ctrl_txbuf, list) {
1933                 list_del(&pkt->list);
1934                 kfree(pkt->buf_begin);
1935                 kfree(pkt);
1936         }
1937
1938         list_for_each_entry_safe(pkt, tpkt,
1939                                  &sip->free_ctrl_rxbuf, list) {
1940                 list_del(&pkt->list);
1941                 kfree(pkt->buf_begin);
1942                 kfree(pkt);
1943         }
1944 }
1945
1946 void sip_detach(struct esp_sip *sip)
1947 {
1948 #ifndef ESP_PREALLOC
1949         int po;
1950 #endif
1951         if (sip == NULL)
1952                 return ;
1953
1954         sip_free_init_ctrl_buf(sip);
1955
1956         if (atomic_read(&sip->state) == SIP_RUN) {
1957
1958                 sif_disable_target_interrupt(sip->epub);
1959
1960                 atomic_set(&sip->state, SIP_STOP);
1961
1962                 /* disable irq here */
1963                 sif_disable_irq(sip->epub);
1964 #ifndef RX_SYNC
1965                 cancel_work_sync(&sip->rx_process_work);
1966 #endif/* RX_SYNC */
1967
1968                 skb_queue_purge(&sip->rxq);
1969                 mutex_destroy(&sip->rx_mtx);
1970                 cancel_work_sync(&sip->epub->sendup_work);
1971                 skb_queue_purge(&sip->epub->rxq);
1972
1973 #ifdef ESP_NO_MAC80211
1974                 unregister_netdev(sip->epub->net_dev);
1975                 wiphy_unregister(sip->epub->wdev->wiphy);
1976 #else
1977                 if (test_and_clear_bit(ESP_WL_FLAG_HW_REGISTERED, &sip->epub->wl.flags)) {
1978                         ieee80211_unregister_hw(sip->epub->hw);
1979                 }
1980 #endif
1981
1982                 /* cancel all worker/timer */
1983                 cancel_work_sync(&sip->epub->tx_work);
1984                 skb_queue_purge(&sip->epub->txq);
1985                 skb_queue_purge(&sip->epub->txdoneq);
1986
1987 #ifdef ESP_PREALLOC
1988                 esp_put_tx_aggr_buf(&sip->tx_aggr_buf);
1989 #else
1990                 po = get_order(SIP_TX_AGGR_BUF_SIZE);
1991                 free_pages((unsigned long)sip->tx_aggr_buf, po);
1992                 sip->tx_aggr_buf = NULL;
1993 #endif
1994
1995                 atomic_set(&sip->state, SIP_INIT);
1996         } else if (atomic_read(&sip->state) >= SIP_BOOT && atomic_read(&sip->state) <= SIP_WAIT_BOOTUP) {
1997
1998                 sif_disable_target_interrupt(sip->epub);
1999                 atomic_set(&sip->state, SIP_STOP);
2000                 sif_disable_irq(sip->epub);
2001
2002                 if (sip->rawbuf)
2003                         kfree(sip->rawbuf);
2004
2005                 if (atomic_read(&sip->state) == SIP_SEND_INIT) {
2006 #ifndef RX_SYNC
2007                         cancel_work_sync(&sip->rx_process_work);
2008 #endif/* RX_SYNC */
2009                         skb_queue_purge(&sip->rxq);
2010                         mutex_destroy(&sip->rx_mtx);
2011                         cancel_work_sync(&sip->epub->sendup_work);
2012                         skb_queue_purge(&sip->epub->rxq);
2013                 }
2014
2015 #ifdef ESP_NO_MAC80211
2016                 unregister_netdev(sip->epub->net_dev);
2017                 wiphy_unregister(sip->epub->wdev->wiphy);
2018 #else
2019                 if (test_and_clear_bit(ESP_WL_FLAG_HW_REGISTERED, &sip->epub->wl.flags)) {
2020                         ieee80211_unregister_hw(sip->epub->hw);
2021                 }
2022 #endif
2023                         atomic_set(&sip->state, SIP_INIT);
2024         } else
2025                 esp_dbg(ESP_DBG_ERROR, "%s wrong state %d\n", __func__, atomic_read(&sip->state));
2026
2027         kfree(sip);
2028 }
2029
2030 int sip_prepare_boot(struct esp_sip *sip)
2031 {
2032         if (atomic_read(&sip->state) != SIP_INIT) {
2033                 esp_dbg(ESP_DBG_ERROR, "%s wrong state %d\n", __func__, atomic_read(&sip->state));
2034                 return -ENOTRECOVERABLE;
2035         }
2036
2037         if (sip->rawbuf == NULL) {
2038                 sip->rawbuf = kzalloc(SIP_BOOT_BUF_SIZE, GFP_KERNEL);
2039
2040                 if (sip->rawbuf == NULL)
2041                         return -ENOMEM;
2042         }
2043
2044         atomic_set(&sip->state, SIP_PREPARE_BOOT);
2045         
2046         return 0;
2047 }
2048
2049 int sip_write_memory(struct esp_sip *sip, u32 addr, u8 *buf, u16 len)
2050 {
2051         struct sip_cmd_write_memory *cmd;
2052         struct sip_hdr *chdr;
2053         u16 remains, hdrs, bufsize;
2054         u32 loadaddr;
2055         u8 *src;
2056         int err = 0;
2057         u32 *t = NULL;
2058
2059         if (sip == NULL || sip->rawbuf == NULL) {
2060                 ESSERT(sip != NULL);
2061                 ESSERT(sip->rawbuf != NULL);
2062                 return -EINVAL;
2063         }
2064
2065         memset(sip->rawbuf, 0, SIP_BOOT_BUF_SIZE);
2066
2067         chdr = (struct sip_hdr *)sip->rawbuf;
2068         SIP_HDR_SET_TYPE(chdr->fc[0], SIP_CTRL);
2069         chdr->c_cmdid = SIP_CMD_WRITE_MEMORY;
2070
2071         remains = len;
2072         hdrs = sizeof(struct sip_hdr) + sizeof(struct sip_cmd_write_memory);
2073
2074         while (remains) {
2075                 src = &buf[len - remains];
2076                 loadaddr = addr + (len - remains);
2077
2078                 if (remains < (SIP_BOOT_BUF_SIZE - hdrs)) {
2079                         /* aligned with 4 bytes */
2080                         bufsize = roundup(remains, 4);
2081                         memset(sip->rawbuf + hdrs, 0, bufsize);
2082                         remains = 0;
2083                 } else {
2084                         bufsize = SIP_BOOT_BUF_SIZE - hdrs;
2085                         remains -=  bufsize;
2086                 }
2087
2088                 chdr->len = bufsize + hdrs;
2089                 chdr->seq = sip->txseq++;
2090                 cmd = (struct sip_cmd_write_memory *)(sip->rawbuf + SIP_CTRL_HDR_LEN);
2091                 cmd->len = bufsize;
2092                 cmd->addr = loadaddr;
2093                 memcpy(sip->rawbuf+hdrs, src, bufsize);
2094
2095                 t = (u32 *)sip->rawbuf;
2096                 esp_dbg(ESP_DBG_TRACE, "%s t0: 0x%08x t1: 0x%08x t2:0x%08x loadaddr 0x%08x \n", __func__, t[0], t[1], t[2], loadaddr);
2097
2098                                 err = esp_common_write(sip->epub, sip->rawbuf, chdr->len, ESP_SIF_SYNC);
2099
2100                 if (err) {
2101                         esp_dbg(ESP_DBG_ERROR, "%s send buffer failed\n", __func__);
2102                         return err;
2103                 }
2104
2105                 // 1ms is enough, in fact on dell-d430, need not delay at all.
2106                 mdelay(1);
2107
2108                 sip_dec_credit(sip);
2109         }
2110
2111         return err;
2112 }
2113
2114 int sip_send_cmd(struct esp_sip *sip, int cid, u32 cmdlen, void *cmd)
2115 {
2116         struct sip_hdr *chdr;
2117         struct sip_pkt *pkt = NULL;
2118         int ret = 0;
2119
2120         pkt = sip_get_ctrl_buf(sip, SIP_TX_CTRL_BUF);
2121
2122         if (pkt == NULL)
2123                 return -ENOMEM;
2124
2125         chdr = (struct sip_hdr *)pkt->buf_begin;
2126         chdr->len = SIP_CTRL_HDR_LEN + cmdlen;
2127         chdr->seq = sip->txseq++;
2128         chdr->c_cmdid = cid;
2129         
2130
2131         if (cmd) {
2132                 memset(pkt->buf, 0, cmdlen);
2133                 memcpy(pkt->buf, (u8 *)cmd, cmdlen);
2134         }
2135
2136         esp_dbg(ESP_DBG_TRACE, "cid %d, len %u, seq %u \n", chdr->c_cmdid, chdr->len, chdr->seq);
2137
2138         esp_dbg(ESP_DBG_TRACE, "c1 0x%08x   c2 0x%08x\n", *(u32 *)&pkt->buf[0], *(u32 *)&pkt->buf[4]);
2139
2140                 ret = esp_common_write(sip->epub, pkt->buf_begin, chdr->len, ESP_SIF_SYNC);
2141
2142         if (ret)
2143                 esp_dbg(ESP_DBG_ERROR, "%s send cmd %d failed \n", __func__, cid);
2144
2145         sip_dec_credit(sip);
2146
2147         sip_reclaim_ctrl_buf(sip, pkt, SIP_TX_CTRL_BUF);
2148
2149         /*
2150          *  Hack here: reset tx/rx seq before target ram code is up...
2151          */
2152         if (cid == SIP_CMD_BOOTUP) {
2153                 sip->rxseq = 0;
2154                 sip->txseq = 0;
2155                 sip->txdataseq = 0;
2156         }
2157
2158         return ret;
2159 }
2160
2161 struct sk_buff *
2162 sip_alloc_ctrl_skbuf(struct esp_sip *sip, u16 len, u32 cid) {
2163         struct sip_hdr *si = NULL;
2164         struct ieee80211_tx_info *ti = NULL;
2165         struct sk_buff *skb = NULL;
2166
2167         ESSERT(len <= sip->tx_blksz);
2168
2169         /* no need to reserve space for net stack */
2170         skb = __dev_alloc_skb(len, GFP_KERNEL);
2171
2172         if (skb == NULL) {
2173                 esp_dbg(ESP_DBG_ERROR, "no skb for ctrl !\n");
2174                 return NULL;
2175         }
2176
2177         skb->len = len;
2178
2179         ti = IEEE80211_SKB_CB(skb);
2180         /* set tx_info flags to 0xffffffff to indicate sip_ctrl pkt */
2181         ti->flags = 0xffffffff;
2182         si = (struct sip_hdr *)skb->data;
2183         memset(si, 0, sizeof(struct sip_hdr));
2184         SIP_HDR_SET_TYPE(si->fc[0], SIP_CTRL);
2185         si->len = len;
2186         si->c_cmdid = cid;
2187
2188         return skb;
2189 }
2190
2191 void
2192 sip_free_ctrl_skbuff(struct esp_sip *sip, struct sk_buff *skb)
2193 {
2194         memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
2195         kfree_skb(skb);
2196 }
2197
2198 static struct sip_pkt *
2199 sip_get_ctrl_buf(struct esp_sip *sip, SIP_BUF_TYPE bftype) {
2200         struct sip_pkt *pkt = NULL;
2201         struct list_head *bflist;
2202         struct sip_hdr *chdr;
2203
2204         bflist = (bftype == SIP_TX_CTRL_BUF) ? &sip->free_ctrl_txbuf :&sip->free_ctrl_rxbuf;
2205
2206         spin_lock_bh(&sip->lock);
2207
2208         if (list_empty(bflist)) {
2209                 spin_unlock_bh(&sip->lock);
2210                 return NULL;
2211         }
2212
2213         pkt = list_first_entry(bflist, struct sip_pkt, list);
2214         list_del(&pkt->list);
2215         spin_unlock_bh(&sip->lock);
2216
2217         if (bftype == SIP_TX_CTRL_BUF) {
2218                 chdr = (struct sip_hdr *)pkt->buf_begin;
2219                 SIP_HDR_SET_TYPE(chdr->fc[0], SIP_CTRL);
2220                 pkt->buf = pkt->buf_begin + SIP_CTRL_HDR_LEN;
2221         } else {
2222                 pkt->buf = pkt->buf_begin;
2223         }
2224
2225         return pkt;
2226 }
2227
2228 static void
2229 sip_reclaim_ctrl_buf(struct esp_sip *sip, struct sip_pkt *pkt, SIP_BUF_TYPE bftype)
2230 {
2231         struct list_head *bflist = NULL;
2232
2233         if (bftype == SIP_TX_CTRL_BUF)
2234                 bflist = &sip->free_ctrl_txbuf;
2235         else if (bftype == SIP_RX_CTRL_BUF)
2236                 bflist = &sip->free_ctrl_rxbuf;
2237         else return;
2238
2239         pkt->buf = pkt->buf_begin;
2240         pkt->payload_len = 0;
2241
2242         spin_lock_bh(&sip->lock);
2243         list_add_tail(&pkt->list, bflist);
2244         spin_unlock_bh(&sip->lock);
2245 }
2246
2247 int
2248 sip_poll_bootup_event(struct esp_sip *sip)
2249 {
2250         int ret = 0;
2251
2252         esp_dbg(ESP_DBG_TRACE, "polling bootup event... \n");
2253
2254         if (gl_bootup_cplx)
2255                 ret = wait_for_completion_timeout(gl_bootup_cplx, 2 * HZ);
2256
2257         esp_dbg(ESP_DBG_TRACE, "******time remain****** = [%d]\n", ret);
2258         if (ret <= 0) {
2259                 esp_dbg(ESP_DBG_ERROR, "bootup event timeout\n");
2260                 return -ETIMEDOUT;
2261         }       
2262
2263     if(sif_get_ate_config() == 0){
2264         ret = esp_register_mac80211(sip->epub);
2265     }
2266
2267 #ifdef TEST_MODE
2268         ret = test_init_netlink(sip);
2269         if (ret < 0) {
2270                 esp_sip_dbg(ESP_DBG_TRACE, "esp_sdio: failed initializing netlink\n");
2271                 return ret;
2272         }
2273 #endif
2274         
2275         atomic_set(&sip->state, SIP_RUN);
2276         esp_dbg(ESP_DBG_TRACE, "target booted up\n");
2277
2278         return ret;
2279 }
2280
2281 int
2282 sip_poll_resetting_event(struct esp_sip *sip)
2283 {
2284         int ret = 0;
2285
2286         esp_dbg(ESP_DBG_TRACE, "polling resetting event... \n");
2287
2288         if (gl_bootup_cplx)
2289                 ret = wait_for_completion_timeout(gl_bootup_cplx, 10 * HZ);
2290
2291         esp_dbg(ESP_DBG_TRACE, "******time remain****** = [%d]\n", ret);
2292         if (ret <= 0) {
2293                 esp_dbg(ESP_DBG_ERROR, "resetting event timeout\n");
2294                 return -ETIMEDOUT;
2295         }       
2296       
2297         esp_dbg(ESP_DBG_TRACE, "target resetting %d %p\n", ret, gl_bootup_cplx);
2298
2299         return 0;
2300 }
2301
2302
2303 #ifdef FPGA_DEBUG
2304
2305 /* bogus bootup cmd for FPGA debugging */
2306 int
2307 sip_send_bootup(struct esp_sip *sip)
2308 {
2309         int ret;
2310         struct sip_cmd_bootup bootcmd;
2311
2312         esp_dbg(ESP_DBG_LOG, "sending bootup\n");
2313
2314         bootcmd.boot_addr = 0;
2315         ret = sip_send_cmd(sip, SIP_CMD_BOOTUP, sizeof(struct sip_cmd_bootup), &bootcmd);
2316
2317         return ret;
2318 }
2319
2320 #endif /* FPGA_DEBUG */
2321
2322 bool
2323 sip_queue_need_stop(struct esp_sip *sip)
2324 {
2325         return atomic_read(&sip->tx_data_pkt_queued) >= SIP_STOP_QUEUE_THRESHOLD
2326                 || (atomic_read(&sip->tx_credits) < 8
2327                 && atomic_read(&sip->tx_data_pkt_queued) >= SIP_STOP_QUEUE_THRESHOLD / 4 * 3);
2328 }
2329
2330 bool
2331 sip_queue_may_resume(struct esp_sip *sip)
2332 {
2333         return atomic_read(&sip->epub->txq_stopped)
2334                 && !test_bit(ESP_WL_FLAG_STOP_TXQ, &sip->epub->wl.flags)
2335                 && ((atomic_read(&sip->tx_credits) >= 16
2336                 && atomic_read(&sip->tx_data_pkt_queued) < SIP_RESUME_QUEUE_THRESHOLD * 2)
2337                 || atomic_read(&sip->tx_data_pkt_queued) < SIP_RESUME_QUEUE_THRESHOLD);
2338 }
2339
2340 #ifndef FAST_TX_STATUS
2341 bool
2342 sip_tx_data_need_stop(struct esp_sip *sip)
2343 {
2344         return atomic_read(&sip->pending_tx_status) >= SIP_PENDING_STOP_TX_THRESHOLD;
2345 }
2346
2347 bool
2348 sip_tx_data_may_resume(struct esp_sip *sip)
2349 {
2350         return atomic_read(&sip->pending_tx_status) < SIP_PENDING_RESUME_TX_THRESHOLD;
2351 }
2352 #endif /* FAST_TX_STATUS */
2353
2354 int
2355 sip_cmd_enqueue(struct esp_sip *sip, struct sk_buff *skb)
2356 {
2357         if (!sip || !sip->epub) {
2358                 esp_dbg(ESP_DBG_ERROR, "func %s, sip->epub->txq is NULL\n", __func__);
2359                 return -EINVAL;
2360         }
2361         
2362         if (!skb) {
2363                 esp_dbg(ESP_DBG_ERROR, "func %s, skb is NULL\n", __func__);
2364                 return -EINVAL;
2365         }
2366
2367         skb_queue_tail(&sip->epub->txq, skb);
2368
2369 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
2370         if(sif_get_ate_config() == 0){
2371             ieee80211_queue_work(sip->epub->hw, &sip->epub->tx_work);
2372         } else {
2373             queue_work(sip->epub->esp_wkq, &sip->epub->tx_work);
2374         } 
2375 #else       
2376         queue_work(sip->epub->esp_wkq, &sip->epub->tx_work);
2377 #endif
2378         return 0;
2379 }
2380
2381 void sip_tx_data_pkt_enqueue(struct esp_pub *epub, struct sk_buff *skb)
2382 {
2383         if(!epub || !epub->sip) {
2384                 if (!epub)
2385                         esp_dbg(ESP_DBG_ERROR, "func %s, epub is NULL\n", __func__);
2386                 else
2387                         esp_dbg(ESP_DBG_ERROR, "func %s, epub->sip is NULL\n", __func__);
2388
2389                 return;
2390         }
2391         if (!skb) {
2392                 esp_dbg(ESP_DBG_ERROR, "func %s, skb is NULL\n", __func__);
2393                 return;
2394         }
2395         skb_queue_tail(&epub->txq, skb);
2396         atomic_inc(&epub->sip->tx_data_pkt_queued);
2397         if(sip_queue_need_stop(epub->sip)){
2398                 if (epub->hw) {
2399                         ieee80211_stop_queues(epub->hw);
2400                         atomic_set(&epub->txq_stopped, true);
2401                 }
2402
2403         }
2404 }
2405
2406 #ifdef FPGA_TXDATA
2407 int sip_send_tx_data(struct esp_sip *sip)
2408 {
2409         struct sk_buff *skb = NULL;
2410         struct sip_cmd_bss_info_update*bsscmd;
2411
2412         skb = sip_alloc_ctrl_skbuf(epub->sip, sizeof(struct sip_cmd_bss_info_update), SIP_CMD_BSS_INFO_UPDATE);
2413         if (!skb)
2414                 return -EINVAL;
2415
2416         bsscmd = (struct sip_cmd_bss_info_update *)(skb->data + sizeof(struct sip_tx_info));
2417         bsscmd->isassoc= (assoc==true)? 1: 0;
2418         memcpy(bsscmd->bssid, bssid, ETH_ALEN);
2419         STRACE_SHOW(epub->sip);
2420         return sip_cmd_enqueue(epub->sip, skb);
2421 }
2422 #endif /* FPGA_TXDATA */
2423
2424 #ifdef SIP_DEBUG
2425 void sip_dump_pending_data(struct esp_pub *epub)
2426 {
2427 #if 0
2428         struct sk_buff *tskb, *tmp;
2429
2430         skb_queue_walk_safe(&epub->txdoneq, tskb, tmp) {
2431                 show_buf(tskb->data, 32);
2432         }
2433 #endif //0000
2434 }
2435 #else
2436 void sip_dump_pending_data(struct esp_pub *epub)
2437 {}
2438 #endif /* SIP_DEBUG */
2439