Merge tag 'lsk-v4.4-16.06-android'
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_linux.c
1 /*
2  * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3  * Basically selected code segments from usb-cdc.c and usb-rndis.c
4  *
5  * Copyright (C) 1999-2016, Broadcom Corporation
6  * 
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  * 
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  * 
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
29  */
30
31 #include <typedefs.h>
32 #include <linuxver.h>
33 #include <osl.h>
34 #ifdef SHOW_LOGTRACE
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
38
39
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/etherdevice.h>
48 #include <linux/random.h>
49 #include <linux/spinlock.h>
50 #include <linux/ethtool.h>
51 #include <linux/fcntl.h>
52 #include <linux/fs.h>
53 #include <linux/ip.h>
54 #include <linux/reboot.h>
55 #include <linux/notifier.h>
56 #include <net/addrconf.h>
57 #ifdef ENABLE_ADAPTIVE_SCHED
58 #include <linux/cpufreq.h>
59 #endif /* ENABLE_ADAPTIVE_SCHED */
60
61 #include <asm/uaccess.h>
62 #include <asm/unaligned.h>
63
64 #include <epivers.h>
65 #include <bcmutils.h>
66 #include <bcmendian.h>
67 #include <bcmdevs.h>
68
69 #include <proto/ethernet.h>
70 #include <proto/bcmevent.h>
71 #include <proto/vlan.h>
72 #include <proto/802.3.h>
73
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
76 #include <dhd.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
80 #endif
81 #include <dhd_bus.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
84 #include <dhd_dbg.h>
85 #ifdef CONFIG_HAS_WAKELOCK
86 #include <linux/wakelock.h>
87 #endif
88 #ifdef WL_CFG80211
89 #include <wl_cfg80211.h>
90 #endif
91 #ifdef PNO_SUPPORT
92 #include <dhd_pno.h>
93 #endif
94 #ifdef RTT_SUPPORT
95 #include <dhd_rtt.h>
96 #endif
97
98 #ifdef CONFIG_COMPAT
99 #include <linux/compat.h>
100 #endif
101
102 #ifdef DHD_WMF
103 #include <dhd_wmf_linux.h>
104 #endif /* DHD_WMF */
105
106 #ifdef DHD_L2_FILTER
107 #include <proto/bcmicmp.h>
108 #include <bcm_l2_filter.h>
109 #include <dhd_l2_filter.h>
110 #endif /* DHD_L2_FILTER */
111
112 #ifdef DHD_PSTA
113 #include <dhd_psta.h>
114 #endif /* DHD_PSTA */
115
116
117 #ifdef DHDTCPACK_SUPPRESS
118 #include <dhd_ip.h>
119 #endif /* DHDTCPACK_SUPPRESS */
120
121 #ifdef DHD_DEBUG_PAGEALLOC
122 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
123 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
124 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
125 #endif /* DHD_DEBUG_PAGEALLOC */
126
127
128 #if defined(DHD_LB)
129 /* Dynamic CPU selection for load balancing */
130 #include <linux/cpu.h>
131 #include <linux/cpumask.h>
132 #include <linux/notifier.h>
133 #include <linux/workqueue.h>
134 #include <asm/atomic.h>
135
136 #if !defined(DHD_LB_PRIMARY_CPUS)
137 #define DHD_LB_PRIMARY_CPUS     0x0 /* Big CPU coreids mask */
138 #endif
139
140 #if !defined(DHD_LB_SECONDARY_CPUS)
141 #define DHD_LB_SECONDARY_CPUS   0xFE /* Little CPU coreids mask */
142 #endif
143
144 #define HIST_BIN_SIZE   8
145
146 #if defined(DHD_LB_RXP)
147 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
148 #endif /* DHD_LB_RXP */
149
150 #endif /* DHD_LB */
151
152 #ifdef WLMEDIA_HTSF
153 #include <linux/time.h>
154 #include <htsf.h>
155
156 #define HTSF_MINLEN 200    /* min. packet length to timestamp */
157 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
158 #define TSMAX  1000        /* max no. of timing record kept   */
159 #define NUMBIN 34
160
161 static uint32 tsidx = 0;
162 static uint32 htsf_seqnum = 0;
163 uint32 tsfsync;
164 struct timeval tsync;
165 static uint32 tsport = 5010;
166
167 typedef struct histo_ {
168         uint32 bin[NUMBIN];
169 } histo_t;
170
171 #if !ISPOWEROF2(DHD_SDALIGN)
172 #error DHD_SDALIGN is not a power of 2!
173 #endif
174
175 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
176 #endif /* WLMEDIA_HTSF */
177
178 #ifdef STBLINUX
179 #ifdef quote_str
180 #undef quote_str
181 #endif /* quote_str */
182 #ifdef to_str
183 #undef to_str
184 #endif /* quote_str */
185 #define to_str(s) #s
186 #define quote_str(s) to_str(s)
187
188 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
189 #endif /* STBLINUX */
190
191
192 #if defined(SOFTAP)
193 extern bool ap_cfg_running;
194 extern bool ap_fw_loaded;
195 #endif
196 extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
197
198 #ifdef FIX_CPU_MIN_CLOCK
199 #include <linux/pm_qos.h>
200 #endif /* FIX_CPU_MIN_CLOCK */
201 #ifdef SET_RANDOM_MAC_SOFTAP
202 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
203 #define CONFIG_DHD_SET_RANDOM_MAC_VAL   0x001A11
204 #endif
205 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
206 #endif /* SET_RANDOM_MAC_SOFTAP */
207 #ifdef ENABLE_ADAPTIVE_SCHED
208 #define DEFAULT_CPUFREQ_THRESH          1000000 /* threshold frequency : 1000000 = 1GHz */
209 #ifndef CUSTOM_CPUFREQ_THRESH
210 #define CUSTOM_CPUFREQ_THRESH   DEFAULT_CPUFREQ_THRESH
211 #endif /* CUSTOM_CPUFREQ_THRESH */
212 #endif /* ENABLE_ADAPTIVE_SCHED */
213
214 /* enable HOSTIP cache update from the host side when an eth0:N is up */
215 #define AOE_IP_ALIAS_SUPPORT 1
216
217 #ifdef BCM_FD_AGGR
218 #include <bcm_rpc.h>
219 #include <bcm_rpc_tp.h>
220 #endif
221 #ifdef PROP_TXSTATUS
222 #include <wlfc_proto.h>
223 #include <dhd_wlfc.h>
224 #endif
225
226 #include <wl_android.h>
227
228 /* Maximum STA per radio */
229 #define DHD_MAX_STA     32
230
231
232
233 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
234 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
235 #define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
236
237 #ifdef ARP_OFFLOAD_SUPPORT
238 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
239 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
240         unsigned long event, void *ptr);
241 static struct notifier_block dhd_inetaddr_notifier = {
242         .notifier_call = dhd_inetaddr_notifier_call
243 };
244 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
245  * created in kernel notifier link list (with 'next' pointing to itself)
246  */
247 static bool dhd_inetaddr_notifier_registered = FALSE;
248 #endif /* ARP_OFFLOAD_SUPPORT */
249
250 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
251 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
252         unsigned long event, void *ptr);
253 static struct notifier_block dhd_inet6addr_notifier = {
254         .notifier_call = dhd_inet6addr_notifier_call
255 };
256 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
257  * created in kernel notifier link list (with 'next' pointing to itself)
258  */
259 static bool dhd_inet6addr_notifier_registered = FALSE;
260 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
261
262 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
263 #include <linux/suspend.h>
264 volatile bool dhd_mmc_suspend = FALSE;
265 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
266 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
267
268 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
269 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
270 #endif 
271 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
272 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
273 #endif 
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
275 MODULE_LICENSE("GPL and additional rights");
276 #endif /* LinuxVer */
277
278 #include <dhd_bus.h>
279
280 #ifdef BCM_FD_AGGR
281 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
282 #else
283 #ifndef PROP_TXSTATUS
284 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
285 #else
286 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
287 #endif
288 #endif /* BCM_FD_AGGR */
289
290 #ifdef PROP_TXSTATUS
291 extern bool dhd_wlfc_skip_fc(void);
292 extern void dhd_wlfc_plat_init(void *dhd);
293 extern void dhd_wlfc_plat_deinit(void *dhd);
294 #endif /* PROP_TXSTATUS */
295 #ifdef USE_DYNAMIC_F2_BLKSIZE
296 extern uint sd_f2_blocksize;
297 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
298 #endif /* USE_DYNAMIC_F2_BLKSIZE */
299
300 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
301 const char *
302 print_tainted()
303 {
304         return "";
305 }
306 #endif  /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
307
308 /* Linux wireless extension support */
309 #if defined(WL_WIRELESS_EXT)
310 #include <wl_iw.h>
311 extern wl_iw_extra_params_t  g_wl_iw_params;
312 #endif /* defined(WL_WIRELESS_EXT) */
313
314 #ifdef CONFIG_PARTIALSUSPEND_SLP
315 #include <linux/partialsuspend_slp.h>
316 #define CONFIG_HAS_EARLYSUSPEND
317 #define DHD_USE_EARLYSUSPEND
318 #define register_early_suspend          register_pre_suspend
319 #define unregister_early_suspend        unregister_pre_suspend
320 #define early_suspend                           pre_suspend
321 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN                50
322 #else
323 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
324 #include <linux/earlysuspend.h>
325 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
326 #endif /* CONFIG_PARTIALSUSPEND_SLP */
327
328 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
329
330 #ifdef PKT_FILTER_SUPPORT
331 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
332 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
333 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
334 #endif
335
336
337 #ifdef READ_MACADDR
338 extern int dhd_read_macaddr(struct dhd_info *dhd);
339 #else
340 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
341 #endif
342 #ifdef WRITE_MACADDR
343 extern int dhd_write_macaddr(struct ether_addr *mac);
344 #else
345 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
346 #endif
347
348
349
350
351
352 #ifdef DHD_FW_COREDUMP
353 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
354 #endif /* DHD_FW_COREDUMP */
355 #ifdef DHD_LOG_DUMP
356 static void dhd_log_dump_init(dhd_pub_t *dhd);
357 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
358 static void dhd_log_dump(void *handle, void *event_info, u8 event);
359 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
360 static int do_dhd_log_dump(dhd_pub_t *dhdp);
361 #endif /* DHD_LOG_DUMP */
362
363 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
364 static struct notifier_block dhd_reboot_notifier = {
365         .notifier_call = dhd_reboot_callback,
366         .priority = 1,
367 };
368
369 #ifdef BCMPCIE
370 static int is_reboot = 0;
371 #endif /* BCMPCIE */
372
373 typedef struct dhd_if_event {
374         struct list_head        list;
375         wl_event_data_if_t      event;
376         char                    name[IFNAMSIZ+1];
377         uint8                   mac[ETHER_ADDR_LEN];
378 } dhd_if_event_t;
379
380 /* Interface control information */
381 typedef struct dhd_if {
382         struct dhd_info *info;                  /* back pointer to dhd_info */
383         /* OS/stack specifics */
384         struct net_device *net;
385         int                             idx;                    /* iface idx in dongle */
386         uint                    subunit;                /* subunit */
387         uint8                   mac_addr[ETHER_ADDR_LEN];       /* assigned MAC address */
388         bool                    set_macaddress;
389         bool                    set_multicast;
390         uint8                   bssidx;                 /* bsscfg index for the interface */
391         bool                    attached;               /* Delayed attachment when unset */
392         bool                    txflowcontrol;  /* Per interface flow control indicator */
393         char                    name[IFNAMSIZ+1]; /* linux interface name */
394         char                    dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
395         struct net_device_stats stats;
396 #ifdef DHD_WMF
397         dhd_wmf_t               wmf;            /* per bsscfg wmf setting */
398 #endif /* DHD_WMF */
399 #ifdef PCIE_FULL_DONGLE
400         struct list_head sta_list;              /* sll of associated stations */
401 #if !defined(BCM_GMAC3)
402         spinlock_t      sta_list_lock;          /* lock for manipulating sll */
403 #endif /* ! BCM_GMAC3 */
404 #endif /* PCIE_FULL_DONGLE */
405         uint32  ap_isolate;                     /* ap-isolation settings */
406 #ifdef DHD_L2_FILTER
407         bool parp_enable;
408         bool parp_discard;
409         bool parp_allnode;
410         arp_table_t *phnd_arp_table;
411 /* for Per BSS modification */
412         bool dhcp_unicast;
413         bool block_ping;
414         bool grat_arp;
415 #endif /* DHD_L2_FILTER */
416 } dhd_if_t;
417
418 #ifdef WLMEDIA_HTSF
419 typedef struct {
420         uint32 low;
421         uint32 high;
422 } tsf_t;
423
424 typedef struct {
425         uint32 last_cycle;
426         uint32 last_sec;
427         uint32 last_tsf;
428         uint32 coef;     /* scaling factor */
429         uint32 coefdec1; /* first decimal  */
430         uint32 coefdec2; /* second decimal */
431 } htsf_t;
432
433 typedef struct {
434         uint32 t1;
435         uint32 t2;
436         uint32 t3;
437         uint32 t4;
438 } tstamp_t;
439
440 static tstamp_t ts[TSMAX];
441 static tstamp_t maxdelayts;
442 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
443
444 #endif  /* WLMEDIA_HTSF */
445
446 struct ipv6_work_info_t {
447         uint8                   if_idx;
448         char                    ipv6_addr[16];
449         unsigned long           event;
450 };
451
452 #ifdef DHD_DEBUG
453 typedef struct dhd_dump {
454         uint8 *buf;
455         int bufsize;
456 } dhd_dump_t;
457 #endif /* DHD_DEBUG */
458
459 /* When Perimeter locks are deployed, any blocking calls must be preceeded
460  * with a PERIM UNLOCK and followed by a PERIM LOCK.
461  * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
462  * wait_event_timeout().
463  */
464
465 /* Local private structure (extension of pub) */
466 typedef struct dhd_info {
467 #if defined(WL_WIRELESS_EXT)
468         wl_iw_t         iw;             /* wireless extensions state (must be first) */
469 #endif /* defined(WL_WIRELESS_EXT) */
470         dhd_pub_t pub;
471         dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
472
473         void *adapter;                  /* adapter information, interrupt, fw path etc. */
474         char fw_path[PATH_MAX];         /* path to firmware image */
475         char nv_path[PATH_MAX];         /* path to nvram vars file */
476         char conf_path[PATH_MAX];       /* path to config vars file */
477
478         /* serialize dhd iovars */
479         struct mutex dhd_iovar_mutex;
480
481         struct semaphore proto_sem;
482 #ifdef PROP_TXSTATUS
483         spinlock_t      wlfc_spinlock;
484
485 #endif /* PROP_TXSTATUS */
486 #ifdef WLMEDIA_HTSF
487         htsf_t  htsf;
488 #endif
489         wait_queue_head_t ioctl_resp_wait;
490         wait_queue_head_t d3ack_wait;
491         wait_queue_head_t dhd_bus_busy_state_wait;
492         uint32  default_wd_interval;
493
494         struct timer_list timer;
495         bool wd_timer_valid;
496 #ifdef DHD_PCIE_RUNTIMEPM
497         struct timer_list rpm_timer;
498         bool rpm_timer_valid;
499         tsk_ctl_t         thr_rpm_ctl;
500 #endif /* DHD_PCIE_RUNTIMEPM */
501         struct tasklet_struct tasklet;
502         spinlock_t      sdlock;
503         spinlock_t      txqlock;
504         spinlock_t      dhd_lock;
505
506         struct semaphore sdsem;
507         tsk_ctl_t       thr_dpc_ctl;
508         tsk_ctl_t       thr_wdt_ctl;
509
510         tsk_ctl_t       thr_rxf_ctl;
511         spinlock_t      rxf_lock;
512         bool            rxthread_enabled;
513
514         /* Wakelocks */
515 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
516         struct wake_lock wl_wifi;   /* Wifi wakelock */
517         struct wake_lock wl_rxwake; /* Wifi rx wakelock */
518         struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
519         struct wake_lock wl_wdwake; /* Wifi wd wakelock */
520         struct wake_lock wl_evtwake; /* Wifi event wakelock */
521 #ifdef BCMPCIE_OOB_HOST_WAKE
522         struct wake_lock wl_intrwake; /* Host wakeup wakelock */
523 #endif /* BCMPCIE_OOB_HOST_WAKE */
524 #ifdef DHD_USE_SCAN_WAKELOCK
525         struct wake_lock wl_scanwake;  /* Wifi scan wakelock */
526 #endif /* DHD_USE_SCAN_WAKELOCK */
527 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
528
529 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
530         /* net_device interface lock, prevent race conditions among net_dev interface
531          * calls and wifi_on or wifi_off
532          */
533         struct mutex dhd_net_if_mutex;
534         struct mutex dhd_suspend_mutex;
535 #endif
536         spinlock_t wakelock_spinlock;
537         spinlock_t wakelock_evt_spinlock;
538         uint32 wakelock_event_counter;
539         uint32 wakelock_counter;
540         int wakelock_wd_counter;
541         int wakelock_rx_timeout_enable;
542         int wakelock_ctrl_timeout_enable;
543         bool waive_wakelock;
544         uint32 wakelock_before_waive;
545
546         /* Thread to issue ioctl for multicast */
547         wait_queue_head_t ctrl_wait;
548         atomic_t pend_8021x_cnt;
549         dhd_attach_states_t dhd_state;
550 #ifdef SHOW_LOGTRACE
551         dhd_event_log_t event_data;
552 #endif /* SHOW_LOGTRACE */
553
554 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
555         struct early_suspend early_suspend;
556 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
557
558 #ifdef ARP_OFFLOAD_SUPPORT
559         u32 pend_ipaddr;
560 #endif /* ARP_OFFLOAD_SUPPORT */
561 #ifdef BCM_FD_AGGR
562         void *rpc_th;
563         void *rpc_osh;
564         struct timer_list rpcth_timer;
565         bool rpcth_timer_active;
566         uint8 fdaggr;
567 #endif
568 #ifdef DHDTCPACK_SUPPRESS
569         spinlock_t      tcpack_lock;
570 #endif /* DHDTCPACK_SUPPRESS */
571 #ifdef FIX_CPU_MIN_CLOCK
572         bool cpufreq_fix_status;
573         struct mutex cpufreq_fix;
574         struct pm_qos_request dhd_cpu_qos;
575 #ifdef FIX_BUS_MIN_CLOCK
576         struct pm_qos_request dhd_bus_qos;
577 #endif /* FIX_BUS_MIN_CLOCK */
578 #endif /* FIX_CPU_MIN_CLOCK */
579         void                    *dhd_deferred_wq;
580 #ifdef DEBUG_CPU_FREQ
581         struct notifier_block freq_trans;
582         int __percpu *new_freq;
583 #endif
584         unsigned int unit;
585         struct notifier_block pm_notifier;
586 #ifdef DHD_PSTA
587         uint32  psta_mode;      /* PSTA or PSR */
588 #endif /* DHD_PSTA */
589 #ifdef DHD_DEBUG
590         dhd_dump_t *dump;
591         struct timer_list join_timer;
592         u32 join_timeout_val;
593         bool join_timer_active;
594         uint scan_time_count;
595         struct timer_list scan_timer;
596         bool scan_timer_active;
597 #endif
598 #if defined(DHD_LB)
599         /* CPU Load Balance dynamic CPU selection */
600
601         /* Variable that tracks the currect CPUs available for candidacy */
602         cpumask_var_t cpumask_curr_avail;
603
604         /* Primary and secondary CPU mask */
605         cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
606         cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
607
608         struct notifier_block cpu_notifier;
609
610         /* Tasklet to handle Tx Completion packet freeing */
611         struct tasklet_struct tx_compl_tasklet;
612         atomic_t        tx_compl_cpu;
613
614
615         /* Tasklet to handle RxBuf Post during Rx completion */
616         struct tasklet_struct rx_compl_tasklet;
617         atomic_t        rx_compl_cpu;
618
619         /* Napi struct for handling rx packet sendup. Packets are removed from
620          * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
621          * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
622          * to run to rx_napi_cpu.
623          */
624         struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
625         struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
626         struct napi_struct    rx_napi_struct ____cacheline_aligned;
627         atomic_t        rx_napi_cpu; /* cpu on which the napi is dispatched */
628         struct net_device    *rx_napi_netdev; /* netdev of primary interface */
629
630         struct work_struct    rx_napi_dispatcher_work;
631         struct work_struct    tx_compl_dispatcher_work;
632         struct work_struct    rx_compl_dispatcher_work;
633         /* Number of times DPC Tasklet ran */
634         uint32  dhd_dpc_cnt;
635
636         /* Number of times NAPI processing got scheduled */
637         uint32  napi_sched_cnt;
638
639         /* Number of times NAPI processing ran on each available core */
640         uint32  napi_percpu_run_cnt[NR_CPUS];
641
642         /* Number of times RX Completions got scheduled */
643         uint32  rxc_sched_cnt;
644         /* Number of times RX Completion ran on each available core */
645         uint32  rxc_percpu_run_cnt[NR_CPUS];
646
647         /* Number of times TX Completions got scheduled */
648         uint32  txc_sched_cnt;
649         /* Number of times TX Completions ran on each available core */
650         uint32  txc_percpu_run_cnt[NR_CPUS];
651
652         /* CPU status */
653         /* Number of times each CPU came online */
654         uint32  cpu_online_cnt[NR_CPUS];
655
656         /* Number of times each CPU went offline */
657         uint32  cpu_offline_cnt[NR_CPUS];
658
659         /*
660          * Consumer Histogram - NAPI RX Packet processing
661          * -----------------------------------------------
662          * On Each CPU, when the NAPI RX Packet processing call back was invoked
663          * how many packets were processed is captured in this data structure.
664          * Now its difficult to capture the "exact" number of packets processed.
665          * So considering the packet counter to be a 32 bit one, we have a
666          * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
667          * processed is rounded off to the next power of 2 and put in the
668          * approriate "bin" the value in the bin gets incremented.
669          * For example, assume that in CPU 1 if NAPI Rx runs 3 times
670          * and the packet count processed is as follows (assume the bin counters are 0)
671          * iteration 1 - 10 (the bin counter 2^4 increments to 1)
672          * iteration 2 - 30 (the bin counter 2^5 increments to 1)
673          * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
674          */
675         uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
676         uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
677         uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
678 #endif /* DHD_LB */
679
680 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
681 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
682
683         struct kobject dhd_kobj;
684 #ifdef SUPPORT_SENSORHUB
685         uint32 shub_enable;
686 #endif /* SUPPORT_SENSORHUB */
687
688         struct delayed_work dhd_memdump_work;
689 } dhd_info_t;
690
691 #define DHDIF_FWDER(dhdif)      FALSE
692
693 /* Flag to indicate if we should download firmware on driver load */
694 uint dhd_download_fw_on_driverload = TRUE;
695
696 /* Flag to indicate if driver is initialized */
697 uint dhd_driver_init_done = FALSE;
698
699 /* Definitions to provide path to the firmware and nvram
700  * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
701  */
702 char firmware_path[MOD_PARAM_PATHLEN];
703 char nvram_path[MOD_PARAM_PATHLEN];
704 char config_path[MOD_PARAM_PATHLEN];
705
706 /* backup buffer for firmware and nvram path */
707 char fw_bak_path[MOD_PARAM_PATHLEN];
708 char nv_bak_path[MOD_PARAM_PATHLEN];
709
710 /* information string to keep firmware, chio, cheip version info visiable from log */
711 char info_string[MOD_PARAM_INFOLEN];
712 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
713 int op_mode = 0;
714 int disable_proptx = 0;
715 module_param(op_mode, int, 0644);
716
717 #if defined(DHD_LB_RXP)
718 static int dhd_napi_weight = 32;
719 module_param(dhd_napi_weight, int, 0644);
720 #endif /* DHD_LB_RXP */
721
722 extern int wl_control_wl_start(struct net_device *dev);
723 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
724 struct semaphore dhd_registration_sem;
725 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
726
727 /* deferred handlers */
728 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
729 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
730 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
731 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
732 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
733 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
734 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
735 #ifdef WL_CFG80211
736 extern void dhd_netdev_free(struct net_device *ndev);
737 #endif /* WL_CFG80211 */
738
739 /* Error bits */
740 module_param(dhd_msg_level, int, 0);
741 #if defined(WL_WIRELESS_EXT)
742 module_param(iw_msg_level, int, 0);
743 #endif
744 #ifdef WL_CFG80211
745 module_param(wl_dbg_level, int, 0);
746 #endif
747 module_param(android_msg_level, int, 0);
748 module_param(config_msg_level, int, 0);
749
750 #ifdef ARP_OFFLOAD_SUPPORT
751 /* ARP offload enable */
752 uint dhd_arp_enable = TRUE;
753 module_param(dhd_arp_enable, uint, 0);
754
755 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
756
757 #ifdef ENABLE_ARP_SNOOP_MODE
758 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
759 #else
760 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
761 #endif  /* ENABLE_ARP_SNOOP_MODE */
762
763 module_param(dhd_arp_mode, uint, 0);
764 #endif /* ARP_OFFLOAD_SUPPORT */
765
766 /* Disable Prop tx */
767 module_param(disable_proptx, int, 0644);
768 /* load firmware and/or nvram values from the filesystem */
769 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
770 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
771 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
772
773 /* Watchdog interval */
774
775 /* extend watchdog expiration to 2 seconds when DPC is running */
776 #define WATCHDOG_EXTEND_INTERVAL (2000)
777
778 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
779 module_param(dhd_watchdog_ms, uint, 0);
780
781 #ifdef DHD_PCIE_RUNTIMEPM
782 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
783 #endif /* DHD_PCIE_RUNTIMEPMT */
784 #if defined(DHD_DEBUG)
785 /* Console poll interval */
786 uint dhd_console_ms = 0;
787 module_param(dhd_console_ms, uint, 0644);
788 #endif /* defined(DHD_DEBUG) */
789
790
791 uint dhd_slpauto = TRUE;
792 module_param(dhd_slpauto, uint, 0);
793
794 #ifdef PKT_FILTER_SUPPORT
795 /* Global Pkt filter enable control */
796 uint dhd_pkt_filter_enable = TRUE;
797 module_param(dhd_pkt_filter_enable, uint, 0);
798 #endif
799
800 /* Pkt filter init setup */
801 uint dhd_pkt_filter_init = 0;
802 module_param(dhd_pkt_filter_init, uint, 0);
803
804 /* Pkt filter mode control */
805 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
806 uint dhd_master_mode = FALSE;
807 #else
808 uint dhd_master_mode = FALSE;
809 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
810 module_param(dhd_master_mode, uint, 0);
811
812 int dhd_watchdog_prio = 0;
813 module_param(dhd_watchdog_prio, int, 0);
814
815 /* DPC thread priority */
816 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
817 module_param(dhd_dpc_prio, int, 0);
818
819 /* RX frame thread priority */
820 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
821 module_param(dhd_rxf_prio, int, 0);
822
823 int passive_channel_skip = 0;
824 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
825
826 #if !defined(BCMDHDUSB)
827 extern int dhd_dongle_ramsize;
828 module_param(dhd_dongle_ramsize, int, 0);
829 #endif /* BCMDHDUSB */
830
831 /* Keep track of number of instances */
832 static int dhd_found = 0;
833 static int instance_base = 0; /* Starting instance number */
834 module_param(instance_base, int, 0644);
835
836 /* Functions to manage sysfs interface for dhd */
837 static int dhd_sysfs_init(dhd_info_t *dhd);
838 static void dhd_sysfs_exit(dhd_info_t *dhd);
839
840 #if defined(DHD_LB)
841
842 static void
843 dhd_lb_set_default_cpus(dhd_info_t *dhd)
844 {
845         /* Default CPU allocation for the jobs */
846         atomic_set(&dhd->rx_napi_cpu, 1);
847         atomic_set(&dhd->rx_compl_cpu, 2);
848         atomic_set(&dhd->tx_compl_cpu, 2);
849 }
850
851 static void
852 dhd_cpumasks_deinit(dhd_info_t *dhd)
853 {
854         free_cpumask_var(dhd->cpumask_curr_avail);
855         free_cpumask_var(dhd->cpumask_primary);
856         free_cpumask_var(dhd->cpumask_primary_new);
857         free_cpumask_var(dhd->cpumask_secondary);
858         free_cpumask_var(dhd->cpumask_secondary_new);
859 }
860
861 static int
862 dhd_cpumasks_init(dhd_info_t *dhd)
863 {
864         int id;
865         uint32 cpus;
866         int ret = 0;
867
868         if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
869                 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
870                 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
871                 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
872                 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
873                 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
874                 ret = -ENOMEM;
875                 goto fail;
876         }
877
878         cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
879         cpumask_clear(dhd->cpumask_primary);
880         cpumask_clear(dhd->cpumask_secondary);
881
882         cpus = DHD_LB_PRIMARY_CPUS;
883         for (id = 0; id < NR_CPUS; id++) {
884                 if (isset(&cpus, id))
885                         cpumask_set_cpu(id, dhd->cpumask_primary);
886         }
887
888         cpus = DHD_LB_SECONDARY_CPUS;
889         for (id = 0; id < NR_CPUS; id++) {
890                 if (isset(&cpus, id))
891                         cpumask_set_cpu(id, dhd->cpumask_secondary);
892         }
893
894         return ret;
895 fail:
896         dhd_cpumasks_deinit(dhd);
897         return ret;
898 }
899
900 /*
901  * The CPU Candidacy Algorithm
902  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
903  * The available CPUs for selection are divided into two groups
904  *  Primary Set - A CPU mask that carries the First Choice CPUs
905  *  Secondary Set - A CPU mask that carries the Second Choice CPUs.
906  *
907  * There are two types of Job, that needs to be assigned to
908  * the CPUs, from one of the above mentioned CPU group. The Jobs are
909  * 1) Rx Packet Processing - napi_cpu
910  * 2) Completion Processiong (Tx, RX) - compl_cpu
911  *
912  * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
913  * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
914  * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
915  * If there are more processors free, it assigns one to compl_cpu.
916  * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
917  * CPU, as much as possible.
918  *
919  * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
920  * would allow Tx completion skb's to be released into a local free pool from
921  * which the rx buffer posts could have been serviced. it is important to note
922  * that a Tx packet may not have a large enough buffer for rx posting.
923  */
924 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
925 {
926         uint32 primary_available_cpus; /* count of primary available cpus */
927         uint32 secondary_available_cpus; /* count of secondary available cpus */
928         uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
929         uint32 compl_cpu = 0; /* cpu selected for completion jobs */
930
931         cpumask_clear(dhd->cpumask_primary_new);
932         cpumask_clear(dhd->cpumask_secondary_new);
933
934         /*
935          * Now select from the primary mask. Even if a Job is
936          * already running on a CPU in secondary group, we still move
937          * to primary CPU. So no conditional checks.
938          */
939         cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
940                 dhd->cpumask_curr_avail);
941
942         cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
943                 dhd->cpumask_curr_avail);
944
945         primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
946
947         if (primary_available_cpus > 0) {
948                 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
949
950                 /* If no further CPU is available,
951                  * cpumask_next returns >= nr_cpu_ids
952                  */
953                 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
954                 if (compl_cpu >= nr_cpu_ids)
955                         compl_cpu = 0;
956         }
957
958         DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
959                 __FUNCTION__, napi_cpu, compl_cpu));
960
961         /* -- Now check for the CPUs from the secondary mask -- */
962         secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
963
964         DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
965                 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
966
967         if (secondary_available_cpus > 0) {
968                 /* At this point if napi_cpu is unassigned it means no CPU
969                  * is online from Primary Group
970                  */
971                 if (napi_cpu == 0) {
972                         napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
973                         compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
974                 } else if (compl_cpu == 0) {
975                         compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
976                 }
977
978                 /* If no CPU was available for completion, choose CPU 0 */
979                 if (compl_cpu >= nr_cpu_ids)
980                         compl_cpu = 0;
981         }
982         if ((primary_available_cpus == 0) &&
983                 (secondary_available_cpus == 0)) {
984                 /* No CPUs available from primary or secondary mask */
985                 napi_cpu = 0;
986                 compl_cpu = 0;
987         }
988
989         DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
990                 __FUNCTION__, napi_cpu, compl_cpu));
991         ASSERT(napi_cpu < nr_cpu_ids);
992         ASSERT(compl_cpu < nr_cpu_ids);
993
994         atomic_set(&dhd->rx_napi_cpu, napi_cpu);
995         atomic_set(&dhd->tx_compl_cpu, compl_cpu);
996         atomic_set(&dhd->rx_compl_cpu, compl_cpu);
997         return;
998 }
999
1000 /*
1001  * Function to handle CPU Hotplug notifications.
1002  * One of the task it does is to trigger the CPU Candidacy algorithm
1003  * for load balancing.
1004  */
1005 int
1006 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1007 {
1008         unsigned int cpu = (unsigned int)(long)hcpu;
1009
1010         dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1011
1012         switch (action)
1013         {
1014                 case CPU_ONLINE:
1015                         DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1016                         cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1017                         dhd_select_cpu_candidacy(dhd);
1018                         break;
1019
1020                 case CPU_DOWN_PREPARE:
1021                 case CPU_DOWN_PREPARE_FROZEN:
1022                         DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1023                         cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1024                         dhd_select_cpu_candidacy(dhd);
1025                         break;
1026                 default:
1027                         break;
1028         }
1029
1030         return NOTIFY_OK;
1031 }
1032
1033 #if defined(DHD_LB_STATS)
1034 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1035 {
1036         dhd_info_t *dhd;
1037         int i, j;
1038
1039         if (dhdp == NULL) {
1040                 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1041                         __FUNCTION__));
1042                 return;
1043         }
1044
1045         dhd = dhdp->info;
1046         if (dhd == NULL) {
1047                 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1048                 return;
1049         }
1050
1051         DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1052         DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1053         DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1054         DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1055
1056         for (i = 0; i < NR_CPUS; i++) {
1057                 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1058                 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1059                 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1060
1061                 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1062                 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1063         }
1064
1065         for (i = 0; i < NR_CPUS; i++) {
1066                 for (j = 0; j < HIST_BIN_SIZE; j++) {
1067                         DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1068                         DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1069                         DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1070                 }
1071         }
1072
1073         return;
1074 }
1075
1076 static void dhd_lb_stats_dump_histo(
1077         struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1078 {
1079         int i, j;
1080         uint32 per_cpu_total[NR_CPUS] = {0};
1081         uint32 total = 0;
1082
1083         bcm_bprintf(strbuf, "CPU: \t\t");
1084         for (i = 0; i < num_possible_cpus(); i++)
1085                 bcm_bprintf(strbuf, "%d\t", i);
1086         bcm_bprintf(strbuf, "\nBin\n");
1087
1088         for (i = 0; i < HIST_BIN_SIZE; i++) {
1089                 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1090                 for (j = 0; j < num_possible_cpus(); j++) {
1091                         bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1092                 }
1093                 bcm_bprintf(strbuf, "\n");
1094         }
1095         bcm_bprintf(strbuf, "Per CPU Total \t");
1096         total = 0;
1097         for (i = 0; i < num_possible_cpus(); i++) {
1098                 for (j = 0; j < HIST_BIN_SIZE; j++) {
1099                         per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1100                 }
1101                 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1102                 total += per_cpu_total[i];
1103         }
1104         bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1105
1106         return;
1107 }
1108
1109 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1110 {
1111         int i;
1112
1113         bcm_bprintf(strbuf, "CPU: \t");
1114         for (i = 0; i < num_possible_cpus(); i++)
1115                 bcm_bprintf(strbuf, "%d\t", i);
1116         bcm_bprintf(strbuf, "\n");
1117
1118         bcm_bprintf(strbuf, "Val: \t");
1119         for (i = 0; i < num_possible_cpus(); i++)
1120                 bcm_bprintf(strbuf, "%u\t", *(p+i));
1121         bcm_bprintf(strbuf, "\n");
1122         return;
1123 }
1124
1125 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1126 {
1127         dhd_info_t *dhd;
1128
1129         if (dhdp == NULL || strbuf == NULL) {
1130                 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1131                         __FUNCTION__, dhdp, strbuf));
1132                 return;
1133         }
1134
1135         dhd = dhdp->info;
1136         if (dhd == NULL) {
1137                 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1138                 return;
1139         }
1140
1141         bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1142         dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1143
1144         bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1145         dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1146
1147         bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1148                 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1149                 dhd->txc_sched_cnt);
1150 #ifdef DHD_LB_RXP
1151         bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1152         dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1153         bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1154         dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1155 #endif /* DHD_LB_RXP */
1156
1157 #ifdef DHD_LB_RXC
1158         bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1159         dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1160         bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1161         dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1162 #endif /* DHD_LB_RXC */
1163
1164
1165 #ifdef DHD_LB_TXC
1166         bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1167         dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1168         bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1169         dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1170 #endif /* DHD_LB_TXC */
1171 }
1172
1173 static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1174 {
1175         uint32 bin_power;
1176         uint32 *p = NULL;
1177
1178         bin_power = next_larger_power2(count);
1179
1180         switch (bin_power) {
1181                 case   0: break;
1182                 case   1: /* Fall through intentionally */
1183                 case   2: p = bin + 0; break;
1184                 case   4: p = bin + 1; break;
1185                 case   8: p = bin + 2; break;
1186                 case  16: p = bin + 3; break;
1187                 case  32: p = bin + 4; break;
1188                 case  64: p = bin + 5; break;
1189                 case 128: p = bin + 6; break;
1190                 default : p = bin + 7; break;
1191         }
1192         if (p)
1193                 *p = *p + 1;
1194         return;
1195 }
1196
1197 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1198 {
1199         int cpu;
1200         dhd_info_t *dhd = dhdp->info;
1201
1202         cpu = get_cpu();
1203         put_cpu();
1204         dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1205
1206         return;
1207 }
1208
1209 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1210 {
1211         int cpu;
1212         dhd_info_t *dhd = dhdp->info;
1213
1214         cpu = get_cpu();
1215         put_cpu();
1216         dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1217
1218         return;
1219 }
1220
1221 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1222 {
1223         int cpu;
1224         dhd_info_t *dhd = dhdp->info;
1225
1226         cpu = get_cpu();
1227         put_cpu();
1228         dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1229
1230         return;
1231 }
1232
1233 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1234 {
1235         dhd_info_t *dhd = dhdp->info;
1236         DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1237 }
1238
1239 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1240 {
1241         dhd_info_t *dhd = dhdp->info;
1242         DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1243 }
1244
1245 #endif /* DHD_LB_STATS */
1246 #endif /* DHD_LB */
1247
1248
1249 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1250 int g_frameburst = 1;
1251 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1252
1253 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1254
1255 /* DHD Perimiter lock only used in router with bypass forwarding. */
1256 #define DHD_PERIM_RADIO_INIT()              do { /* noop */ } while (0)
1257 #define DHD_PERIM_LOCK_TRY(unit, flag)      do { /* noop */ } while (0)
1258 #define DHD_PERIM_UNLOCK_TRY(unit, flag)    do { /* noop */ } while (0)
1259
1260 #ifdef PCIE_FULL_DONGLE
1261 #if defined(BCM_GMAC3)
1262 #define DHD_IF_STA_LIST_LOCK_INIT(ifp)      do { /* noop */ } while (0)
1263 #define DHD_IF_STA_LIST_LOCK(ifp, flags)    ({ BCM_REFERENCE(flags); })
1264 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags)  ({ BCM_REFERENCE(flags); })
1265
1266 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1267 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1268 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1269 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1270
1271 #else /* ! BCM_GMAC3 */
1272 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1273 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1274         spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1275 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1276         spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1277
1278 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1279 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1280         struct list_head *snapshot_list);
1281 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1282 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1283 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1284 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1285
1286 #endif /* ! BCM_GMAC3 */
1287 #endif /* PCIE_FULL_DONGLE */
1288
1289 /* Control fw roaming */
1290 uint dhd_roam_disable = 0;
1291
1292 #ifdef BCMDBGFS
1293 extern int dhd_dbg_init(dhd_pub_t *dhdp);
1294 extern void dhd_dbg_remove(void);
1295 #endif
1296
1297 /* Control radio state */
1298 uint dhd_radio_up = 1;
1299
1300 /* Network inteface name */
1301 char iface_name[IFNAMSIZ] = {'\0'};
1302 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1303
1304 /* The following are specific to the SDIO dongle */
1305
1306 /* IOCTL response timeout */
1307 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1308
1309 /* Idle timeout for backplane clock */
1310 int dhd_idletime = DHD_IDLETIME_TICKS;
1311 module_param(dhd_idletime, int, 0);
1312
1313 /* Use polling */
1314 uint dhd_poll = FALSE;
1315 module_param(dhd_poll, uint, 0);
1316
1317 /* Use interrupts */
1318 uint dhd_intr = TRUE;
1319 module_param(dhd_intr, uint, 0);
1320
1321 /* SDIO Drive Strength (in milliamps) */
1322 uint dhd_sdiod_drive_strength = 6;
1323 module_param(dhd_sdiod_drive_strength, uint, 0);
1324
1325 #ifdef BCMSDIO
1326 /* Tx/Rx bounds */
1327 extern uint dhd_txbound;
1328 extern uint dhd_rxbound;
1329 module_param(dhd_txbound, uint, 0);
1330 module_param(dhd_rxbound, uint, 0);
1331
1332 /* Deferred transmits */
1333 extern uint dhd_deferred_tx;
1334 module_param(dhd_deferred_tx, uint, 0);
1335
1336 #endif /* BCMSDIO */
1337
1338
1339 #ifdef SDTEST
1340 /* Echo packet generator (pkts/s) */
1341 uint dhd_pktgen = 0;
1342 module_param(dhd_pktgen, uint, 0);
1343
1344 /* Echo packet len (0 => sawtooth, max 2040) */
1345 uint dhd_pktgen_len = 0;
1346 module_param(dhd_pktgen_len, uint, 0);
1347 #endif /* SDTEST */
1348
1349
1350
1351 /* Allow delayed firmware download for debug purpose */
1352 int allow_delay_fwdl = FALSE;
1353 module_param(allow_delay_fwdl, int, 0);
1354
1355 extern char dhd_version[];
1356 extern char fw_version[];
1357
1358 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1359 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1360 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1361 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1362 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1363
1364 #ifdef WLMEDIA_HTSF
1365 void htsf_update(dhd_info_t *dhd, void *data);
1366 tsf_t prev_tsf, cur_tsf;
1367
1368 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1369 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1370 static void dhd_dump_latency(void);
1371 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1372 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1373 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1374 #endif /* WLMEDIA_HTSF */
1375
1376 /* Monitor interface */
1377 int dhd_monitor_init(void *dhd_pub);
1378 int dhd_monitor_uninit(void);
1379
1380
1381 #if defined(WL_WIRELESS_EXT)
1382 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1383 #endif /* defined(WL_WIRELESS_EXT) */
1384
1385 static void dhd_dpc(ulong data);
1386 /* forward decl */
1387 extern int dhd_wait_pend8021x(struct net_device *dev);
1388 void dhd_os_wd_timer_extend(void *bus, bool extend);
1389
1390 #ifdef TOE
1391 #ifndef BDC
1392 #error TOE requires BDC
1393 #endif /* !BDC */
1394 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1395 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1396 #endif /* TOE */
1397
1398 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1399                              wl_event_msg_t *event_ptr, void **data_ptr);
1400
1401 #if defined(CONFIG_PM_SLEEP)
1402 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1403 {
1404         int ret = NOTIFY_DONE;
1405         bool suspend = FALSE;
1406         dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1407
1408         BCM_REFERENCE(dhdinfo);
1409
1410         switch (action) {
1411         case PM_HIBERNATION_PREPARE:
1412         case PM_SUSPEND_PREPARE:
1413                 suspend = TRUE;
1414                 break;
1415
1416         case PM_POST_HIBERNATION:
1417         case PM_POST_SUSPEND:
1418                 suspend = FALSE;
1419                 break;
1420         }
1421
1422 #if defined(SUPPORT_P2P_GO_PS)
1423 #ifdef PROP_TXSTATUS
1424         if (suspend) {
1425                 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1426                 dhd_wlfc_suspend(&dhdinfo->pub);
1427                 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1428         } else
1429                 dhd_wlfc_resume(&dhdinfo->pub);
1430 #endif /* PROP_TXSTATUS */
1431 #endif /* defined(SUPPORT_P2P_GO_PS) */
1432
1433 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1434         KERNEL_VERSION(2, 6, 39))
1435         dhd_mmc_suspend = suspend;
1436         smp_mb();
1437 #endif
1438
1439         return ret;
1440 }
1441
1442 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1443  * created in kernel notifier link list (with 'next' pointing to itself)
1444  */
1445 static bool dhd_pm_notifier_registered = FALSE;
1446
1447 extern int register_pm_notifier(struct notifier_block *nb);
1448 extern int unregister_pm_notifier(struct notifier_block *nb);
1449 #endif /* CONFIG_PM_SLEEP */
1450
1451 /* Request scheduling of the bus rx frame */
1452 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1453 static void dhd_os_rxflock(dhd_pub_t *pub);
1454 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1455
1456 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1457 typedef struct dhd_dev_priv {
1458         dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1459         dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
1460         int          ifidx; /* interface index */
1461 } dhd_dev_priv_t;
1462
1463 #define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
1464 #define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
1465 #define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1466 #define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1467 #define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1468
1469 /** Clear the dhd net_device's private structure. */
1470 static inline void
1471 dhd_dev_priv_clear(struct net_device * dev)
1472 {
1473         dhd_dev_priv_t * dev_priv;
1474         ASSERT(dev != (struct net_device *)NULL);
1475         dev_priv = DHD_DEV_PRIV(dev);
1476         dev_priv->dhd = (dhd_info_t *)NULL;
1477         dev_priv->ifp = (dhd_if_t *)NULL;
1478         dev_priv->ifidx = DHD_BAD_IF;
1479 }
1480
1481 /** Setup the dhd net_device's private structure. */
1482 static inline void
1483 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1484                   int ifidx)
1485 {
1486         dhd_dev_priv_t * dev_priv;
1487         ASSERT(dev != (struct net_device *)NULL);
1488         dev_priv = DHD_DEV_PRIV(dev);
1489         dev_priv->dhd = dhd;
1490         dev_priv->ifp = ifp;
1491         dev_priv->ifidx = ifidx;
1492 }
1493
1494 #ifdef PCIE_FULL_DONGLE
1495
1496 /** Dummy objects are defined with state representing bad|down.
1497  * Performance gains from reducing branch conditionals, instruction parallelism,
1498  * dual issue, reducing load shadows, avail of larger pipelines.
1499  * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1500  * is accessed via the dhd_sta_t.
1501  */
1502
1503 /* Dummy dhd_info object */
1504 dhd_info_t dhd_info_null = {
1505 #if defined(BCM_GMAC3)
1506         .fwdh = FWDER_NULL,
1507 #endif
1508         .pub = {
1509                  .info = &dhd_info_null,
1510 #ifdef DHDTCPACK_SUPPRESS
1511                  .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1512 #endif /* DHDTCPACK_SUPPRESS */
1513                  .up = FALSE,
1514                  .busstate = DHD_BUS_DOWN
1515         }
1516 };
1517 #define DHD_INFO_NULL (&dhd_info_null)
1518 #define DHD_PUB_NULL  (&dhd_info_null.pub)
1519
1520 /* Dummy netdevice object */
1521 struct net_device dhd_net_dev_null = {
1522         .reg_state = NETREG_UNREGISTERED
1523 };
1524 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1525
1526 /* Dummy dhd_if object */
1527 dhd_if_t dhd_if_null = {
1528 #if defined(BCM_GMAC3)
1529         .fwdh = FWDER_NULL,
1530 #endif
1531 #ifdef WMF
1532         .wmf = { .wmf_enable = TRUE },
1533 #endif
1534         .info = DHD_INFO_NULL,
1535         .net = DHD_NET_DEV_NULL,
1536         .idx = DHD_BAD_IF
1537 };
1538 #define DHD_IF_NULL  (&dhd_if_null)
1539
1540 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1541
1542 /** Interface STA list management. */
1543
1544 /** Fetch the dhd_if object, given the interface index in the dhd. */
1545 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1546
1547 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1548 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1549 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1550
1551 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1552 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1553 static void     dhd_if_flush_sta(dhd_if_t * ifp);
1554
1555 /* Construct/Destruct a sta pool. */
1556 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1557 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1558 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1559 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1560
1561
1562 /* Return interface pointer */
1563 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1564 {
1565         ASSERT(ifidx < DHD_MAX_IFS);
1566
1567         if (ifidx >= DHD_MAX_IFS)
1568                 return NULL;
1569
1570         return dhdp->info->iflist[ifidx];
1571 }
1572
1573 /** Reset a dhd_sta object and free into the dhd pool. */
1574 static void
1575 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1576 {
1577         int prio;
1578
1579         ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1580
1581         ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1582
1583         /*
1584          * Flush and free all packets in all flowring's queues belonging to sta.
1585          * Packets in flow ring will be flushed later.
1586          */
1587         for (prio = 0; prio < (int)NUMPRIO; prio++) {
1588                 uint16 flowid = sta->flowid[prio];
1589
1590                 if (flowid != FLOWID_INVALID) {
1591                         unsigned long flags;
1592                         flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1593                         flow_ring_node_t * flow_ring_node;
1594
1595 #ifdef DHDTCPACK_SUPPRESS
1596                         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1597                          * when there is a newly coming packet from network stack.
1598                          */
1599                         dhd_tcpack_info_tbl_clean(dhdp);
1600 #endif /* DHDTCPACK_SUPPRESS */
1601
1602                         flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1603                         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1604                         flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1605
1606                         if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1607                                 void * pkt;
1608                                 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1609                                         PKTFREE(dhdp->osh, pkt, TRUE);
1610                                 }
1611                         }
1612
1613                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1614                         ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1615                 }
1616
1617                 sta->flowid[prio] = FLOWID_INVALID;
1618         }
1619
1620         id16_map_free(dhdp->staid_allocator, sta->idx);
1621         DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1622         sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1623         sta->ifidx = DHD_BAD_IF;
1624         bzero(sta->ea.octet, ETHER_ADDR_LEN);
1625         INIT_LIST_HEAD(&sta->list);
1626         sta->idx = ID16_INVALID; /* implying free */
1627 }
1628
1629 /** Allocate a dhd_sta object from the dhd pool. */
1630 static dhd_sta_t *
1631 dhd_sta_alloc(dhd_pub_t * dhdp)
1632 {
1633         uint16 idx;
1634         dhd_sta_t * sta;
1635         dhd_sta_pool_t * sta_pool;
1636
1637         ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1638
1639         idx = id16_map_alloc(dhdp->staid_allocator);
1640         if (idx == ID16_INVALID) {
1641                 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1642                 return DHD_STA_NULL;
1643         }
1644
1645         sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1646         sta = &sta_pool[idx];
1647
1648         ASSERT((sta->idx == ID16_INVALID) &&
1649                (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1650
1651         DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1652
1653         sta->idx = idx; /* implying allocated */
1654
1655         return sta;
1656 }
1657
1658 /** Delete all STAs in an interface's STA list. */
1659 static void
1660 dhd_if_del_sta_list(dhd_if_t *ifp)
1661 {
1662         dhd_sta_t *sta, *next;
1663         unsigned long flags;
1664
1665         DHD_IF_STA_LIST_LOCK(ifp, flags);
1666
1667         list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1668 #if defined(BCM_GMAC3)
1669                 if (ifp->fwdh) {
1670                         /* Remove sta from WOFA forwarder. */
1671                         fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1672                 }
1673 #endif /* BCM_GMAC3 */
1674                 list_del(&sta->list);
1675                 dhd_sta_free(&ifp->info->pub, sta);
1676         }
1677
1678         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1679
1680         return;
1681 }
1682
1683 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1684 static void
1685 dhd_if_flush_sta(dhd_if_t * ifp)
1686 {
1687 #if defined(BCM_GMAC3)
1688
1689         if (ifp && (ifp->fwdh != FWDER_NULL)) {
1690                 dhd_sta_t *sta, *next;
1691                 unsigned long flags;
1692
1693                 DHD_IF_STA_LIST_LOCK(ifp, flags);
1694
1695                 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1696                         /* Remove any sta entry from WOFA forwarder. */
1697                         fwder_flush(ifp->fwdh, (wofa_t)sta);
1698                 }
1699
1700                 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1701         }
1702 #endif /* BCM_GMAC3 */
1703 }
1704
1705 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1706 static int
1707 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1708 {
1709         int idx, prio, sta_pool_memsz;
1710         dhd_sta_t * sta;
1711         dhd_sta_pool_t * sta_pool;
1712         void * staid_allocator;
1713
1714         ASSERT(dhdp != (dhd_pub_t *)NULL);
1715         ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1716
1717         /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1718         staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1719         if (staid_allocator == NULL) {
1720                 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1721                 return BCME_ERROR;
1722         }
1723
1724         /* Pre allocate a pool of dhd_sta objects (one extra). */
1725         sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1726         sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1727         if (sta_pool == NULL) {
1728                 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1729                 id16_map_fini(dhdp->osh, staid_allocator);
1730                 return BCME_ERROR;
1731         }
1732
1733         dhdp->sta_pool = sta_pool;
1734         dhdp->staid_allocator = staid_allocator;
1735
1736         /* Initialize all sta(s) for the pre-allocated free pool. */
1737         bzero((uchar *)sta_pool, sta_pool_memsz);
1738         for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1739                 sta = &sta_pool[idx];
1740                 sta->idx = id16_map_alloc(staid_allocator);
1741                 ASSERT(sta->idx <= max_sta);
1742         }
1743         /* Now place them into the pre-allocated free pool. */
1744         for (idx = 1; idx <= max_sta; idx++) {
1745                 sta = &sta_pool[idx];
1746                 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1747                         sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1748                 }
1749                 dhd_sta_free(dhdp, sta);
1750         }
1751
1752         return BCME_OK;
1753 }
1754
1755 /** Destruct the pool of dhd_sta_t objects.
1756  * Caller must ensure that no STA objects are currently associated with an if.
1757  */
1758 static void
1759 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1760 {
1761         dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1762
1763         if (sta_pool) {
1764                 int idx;
1765                 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1766                 for (idx = 1; idx <= max_sta; idx++) {
1767                         ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1768                         ASSERT(sta_pool[idx].idx == ID16_INVALID);
1769                 }
1770                 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1771                 dhdp->sta_pool = NULL;
1772         }
1773
1774         id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1775         dhdp->staid_allocator = NULL;
1776 }
1777
1778 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1779 static void
1780 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1781 {
1782         int idx, prio, sta_pool_memsz;
1783         dhd_sta_t * sta;
1784         dhd_sta_pool_t * sta_pool;
1785         void *staid_allocator;
1786
1787         if (!dhdp) {
1788                 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1789                 return;
1790         }
1791
1792         sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1793         staid_allocator = dhdp->staid_allocator;
1794
1795         if (!sta_pool) {
1796                 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1797                 return;
1798         }
1799
1800         if (!staid_allocator) {
1801                 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1802                 return;
1803         }
1804
1805         /* clear free pool */
1806         sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1807         bzero((uchar *)sta_pool, sta_pool_memsz);
1808
1809         /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1810         id16_map_clear(staid_allocator, max_sta, 1);
1811
1812         /* Initialize all sta(s) for the pre-allocated free pool. */
1813         for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1814                 sta = &sta_pool[idx];
1815                 sta->idx = id16_map_alloc(staid_allocator);
1816                 ASSERT(sta->idx <= max_sta);
1817         }
1818         /* Now place them into the pre-allocated free pool. */
1819         for (idx = 1; idx <= max_sta; idx++) {
1820                 sta = &sta_pool[idx];
1821                 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1822                         sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1823                 }
1824                 dhd_sta_free(dhdp, sta);
1825         }
1826 }
1827
1828 /** Find STA with MAC address ea in an interface's STA list. */
1829 dhd_sta_t *
1830 dhd_find_sta(void *pub, int ifidx, void *ea)
1831 {
1832         dhd_sta_t *sta;
1833         dhd_if_t *ifp;
1834         unsigned long flags;
1835
1836         ASSERT(ea != NULL);
1837         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1838         if (ifp == NULL)
1839                 return DHD_STA_NULL;
1840
1841         DHD_IF_STA_LIST_LOCK(ifp, flags);
1842
1843         list_for_each_entry(sta, &ifp->sta_list, list) {
1844                 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1845                         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1846                         return sta;
1847                 }
1848         }
1849
1850         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1851
1852         return DHD_STA_NULL;
1853 }
1854
1855 /** Add STA into the interface's STA list. */
1856 dhd_sta_t *
1857 dhd_add_sta(void *pub, int ifidx, void *ea)
1858 {
1859         dhd_sta_t *sta;
1860         dhd_if_t *ifp;
1861         unsigned long flags;
1862
1863         ASSERT(ea != NULL);
1864         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1865         if (ifp == NULL)
1866                 return DHD_STA_NULL;
1867
1868         sta = dhd_sta_alloc((dhd_pub_t *)pub);
1869         if (sta == DHD_STA_NULL) {
1870                 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1871                 return DHD_STA_NULL;
1872         }
1873
1874         memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1875
1876         /* link the sta and the dhd interface */
1877         sta->ifp = ifp;
1878         sta->ifidx = ifidx;
1879         INIT_LIST_HEAD(&sta->list);
1880
1881         DHD_IF_STA_LIST_LOCK(ifp, flags);
1882
1883         list_add_tail(&sta->list, &ifp->sta_list);
1884
1885 #if defined(BCM_GMAC3)
1886         if (ifp->fwdh) {
1887                 ASSERT(ISALIGNED(ea, 2));
1888                 /* Add sta to WOFA forwarder. */
1889                 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1890         }
1891 #endif /* BCM_GMAC3 */
1892
1893         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1894
1895         return sta;
1896 }
1897
1898 /** Delete STA from the interface's STA list. */
1899 void
1900 dhd_del_sta(void *pub, int ifidx, void *ea)
1901 {
1902         dhd_sta_t *sta, *next;
1903         dhd_if_t *ifp;
1904         unsigned long flags;
1905
1906         ASSERT(ea != NULL);
1907         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1908         if (ifp == NULL)
1909                 return;
1910
1911         DHD_IF_STA_LIST_LOCK(ifp, flags);
1912
1913         list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1914                 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1915 #if defined(BCM_GMAC3)
1916                         if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1917                                 ASSERT(ISALIGNED(ea, 2));
1918                                 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1919                         }
1920 #endif /* BCM_GMAC3 */
1921                         list_del(&sta->list);
1922                         dhd_sta_free(&ifp->info->pub, sta);
1923                 }
1924         }
1925
1926         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1927 #ifdef DHD_L2_FILTER
1928         if (ifp->parp_enable) {
1929                 /* clear Proxy ARP cache of specific Ethernet Address */
1930                 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1931                         ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1932         }
1933 #endif /* DHD_L2_FILTER */
1934         return;
1935 }
1936
1937 /** Add STA if it doesn't exist. Not reentrant. */
1938 dhd_sta_t*
1939 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1940 {
1941         dhd_sta_t *sta;
1942
1943         sta = dhd_find_sta(pub, ifidx, ea);
1944
1945         if (!sta) {
1946                 /* Add entry */
1947                 sta = dhd_add_sta(pub, ifidx, ea);
1948         }
1949
1950         return sta;
1951 }
1952
1953 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1954 #if !defined(BCM_GMAC3)
1955 static struct list_head *
1956 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1957 {
1958         unsigned long flags;
1959         dhd_sta_t *sta, *snapshot;
1960
1961         INIT_LIST_HEAD(snapshot_list);
1962
1963         DHD_IF_STA_LIST_LOCK(ifp, flags);
1964
1965         list_for_each_entry(sta, &ifp->sta_list, list) {
1966                 /* allocate one and add to snapshot */
1967                 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1968                 if (snapshot == NULL) {
1969                         DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1970                         continue;
1971                 }
1972
1973                 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1974
1975                 INIT_LIST_HEAD(&snapshot->list);
1976                 list_add_tail(&snapshot->list, snapshot_list);
1977         }
1978
1979         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1980
1981         return snapshot_list;
1982 }
1983
1984 static void
1985 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1986 {
1987         dhd_sta_t *sta, *next;
1988
1989         list_for_each_entry_safe(sta, next, snapshot_list, list) {
1990                 list_del(&sta->list);
1991                 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1992         }
1993 }
1994 #endif /* !BCM_GMAC3 */
1995 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1996
1997 #else
1998 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1999 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2000 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2001 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2002 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2003 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2004 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2005 #endif /* PCIE_FULL_DONGLE */
2006
2007
2008 #if defined(DHD_LB)
2009
2010 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2011 /**
2012  * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2013  * CPU and schedules a tasklet.
2014  * @tasklet: opaque pointer to the tasklet
2015  */
2016 static INLINE void
2017 dhd_tasklet_schedule(void *tasklet)
2018 {
2019         tasklet_schedule((struct tasklet_struct *)tasklet);
2020 }
2021
2022 /**
2023  * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2024  * @tasklet: tasklet to be scheduled
2025  * @on_cpu: cpu core id
2026  *
2027  * If the requested cpu is online, then an IPI is sent to this cpu via the
2028  * smp_call_function_single with no wait and the tasklet_schedule function
2029  * will be invoked to schedule the specified tasklet on the requested CPU.
2030  */
2031 static void
2032 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2033 {
2034         const int wait = 0;
2035         smp_call_function_single(on_cpu,
2036                 dhd_tasklet_schedule, (void *)tasklet, wait);
2037 }
2038 #endif /* DHD_LB_TXC || DHD_LB_RXC */
2039
2040
2041 #if defined(DHD_LB_TXC)
2042 /**
2043  * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2044  * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2045  * freeing the packets placed in the tx_compl workq
2046  */
2047 void
2048 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2049 {
2050         dhd_info_t *dhd = dhdp->info;
2051         int curr_cpu, on_cpu;
2052
2053         if (dhd->rx_napi_netdev == NULL) {
2054                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2055                 return;
2056         }
2057
2058         DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2059         /*
2060          * If the destination CPU is NOT online or is same as current CPU
2061          * no need to schedule the work
2062          */
2063         curr_cpu = get_cpu();
2064         put_cpu();
2065
2066         on_cpu = atomic_read(&dhd->tx_compl_cpu);
2067
2068         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2069                 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2070         } else {
2071                 schedule_work(&dhd->tx_compl_dispatcher_work);
2072         }
2073 }
2074
2075 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2076 {
2077         struct dhd_info *dhd =
2078                 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2079         int cpu;
2080
2081         get_online_cpus();
2082         cpu = atomic_read(&dhd->tx_compl_cpu);
2083         if (!cpu_online(cpu))
2084                 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2085         else
2086                 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2087         put_online_cpus();
2088 }
2089
2090 #endif /* DHD_LB_TXC */
2091
2092
2093 #if defined(DHD_LB_RXC)
2094 /**
2095  * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2096  * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2097  * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2098  * placed in the rx_compl workq.
2099  *
2100  * @dhdp: pointer to dhd_pub object
2101  */
2102 void
2103 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2104 {
2105         dhd_info_t *dhd = dhdp->info;
2106         int curr_cpu, on_cpu;
2107
2108         if (dhd->rx_napi_netdev == NULL) {
2109                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2110                 return;
2111         }
2112
2113         DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2114         /*
2115          * If the destination CPU is NOT online or is same as current CPU
2116          * no need to schedule the work
2117          */
2118         curr_cpu = get_cpu();
2119         put_cpu();
2120
2121         on_cpu = atomic_read(&dhd->rx_compl_cpu);
2122
2123         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2124                 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2125         } else {
2126                 schedule_work(&dhd->rx_compl_dispatcher_work);
2127         }
2128 }
2129
2130 static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2131 {
2132         struct dhd_info *dhd =
2133                 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2134         int cpu;
2135
2136         get_online_cpus();
2137         cpu = atomic_read(&dhd->tx_compl_cpu);
2138         if (!cpu_online(cpu))
2139                 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2140         else
2141                 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2142         put_online_cpus();
2143 }
2144
2145 #endif /* DHD_LB_RXC */
2146
2147
2148 #if defined(DHD_LB_RXP)
2149 /**
2150  * dhd_napi_poll - Load balance napi poll function to process received
2151  * packets and send up the network stack using netif_receive_skb()
2152  *
2153  * @napi: napi object in which context this poll function is invoked
2154  * @budget: number of packets to be processed.
2155  *
2156  * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2157  * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2158  * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2159  * packet tag and sendup.
2160  */
2161 static int
2162 dhd_napi_poll(struct napi_struct *napi, int budget)
2163 {
2164         int ifid;
2165         const int pkt_count = 1;
2166         const int chan = 0;
2167         struct sk_buff * skb;
2168         unsigned long flags;
2169         struct dhd_info *dhd;
2170         int processed = 0;
2171         struct sk_buff_head rx_process_queue;
2172
2173         dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2174         DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2175                 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2176
2177         __skb_queue_head_init(&rx_process_queue);
2178
2179         /* extract the entire rx_napi_queue into local rx_process_queue */
2180         spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2181         skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2182         spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2183
2184         while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2185                 OSL_PREFETCH(skb->data);
2186
2187                 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2188
2189                 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2190                         __FUNCTION__, skb, ifid));
2191
2192                 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2193                 processed++;
2194         }
2195
2196         DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2197
2198         DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2199         napi_complete(napi);
2200
2201         return budget - 1;
2202 }
2203
2204 /**
2205  * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2206  * poll list. This function may be invoked via the smp_call_function_single
2207  * from a remote CPU.
2208  *
2209  * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2210  * after the napi_struct is added to the softnet data's poll_list
2211  *
2212  * @info: pointer to a dhd_info struct
2213  */
2214 static void
2215 dhd_napi_schedule(void *info)
2216 {
2217         dhd_info_t *dhd = (dhd_info_t *)info;
2218
2219         DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2220                 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2221
2222         /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2223         if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2224                 __napi_schedule(&dhd->rx_napi_struct);
2225                 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2226         }
2227
2228         /*
2229          * If the rx_napi_struct was already running, then we let it complete
2230          * processing all its packets. The rx_napi_struct may only run on one
2231          * core at a time, to avoid out-of-order handling.
2232          */
2233 }
2234
2235 /**
2236  * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2237  * action after placing the dhd's rx_process napi object in the the remote CPU's
2238  * softnet data's poll_list.
2239  *
2240  * @dhd: dhd_info which has the rx_process napi object
2241  * @on_cpu: desired remote CPU id
2242  */
2243 static INLINE int
2244 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2245 {
2246         int wait = 0; /* asynchronous IPI */
2247
2248         DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2249                 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2250
2251         if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2252                 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2253                         __FUNCTION__, on_cpu));
2254         }
2255
2256         DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2257
2258         return 0;
2259 }
2260
2261 /*
2262  * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2263  * Why should we do this?
2264  * The candidacy algorithm is run from the call back function
2265  * registered to CPU hotplug notifier. This call back happens from Worker
2266  * context. The dhd_napi_schedule_on is also from worker context.
2267  * Note that both of this can run on two different CPUs at the same time.
2268  * So we can possibly have a window where a given CPUn is being brought
2269  * down from CPUm while we try to run a function on CPUn.
2270  * To prevent this its better have the whole code to execute an SMP
2271  * function under get_online_cpus.
2272  * This function call ensures that hotplug mechanism does not kick-in
2273  * until we are done dealing with online CPUs
2274  * If the hotplug worker is already running, no worries because the
2275  * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2276  *
2277  * The below mentioned code structure is proposed in
2278  * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2279  * for the question
2280  * Q: I need to ensure that a particular cpu is not removed when there is some
2281  *    work specific to this cpu is in progress
2282  *
2283  * According to the documentation calling get_online_cpus is NOT required, if
2284  * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2285  * run from Work Queue context we have to call these functions
2286  */
2287 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2288 {
2289         struct dhd_info *dhd =
2290                 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2291         int cpu;
2292
2293         get_online_cpus();
2294         cpu = atomic_read(&dhd->rx_napi_cpu);
2295         if (!cpu_online(cpu))
2296                 dhd_napi_schedule(dhd);
2297         else
2298                 dhd_napi_schedule_on(dhd, cpu);
2299         put_online_cpus();
2300 }
2301
2302 /**
2303  * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2304  * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2305  * the packets enqueued into the rx_napi_queue and sendup.
2306  * The producer's rx packet queue is appended to the rx_napi_queue before
2307  * dispatching the rx_napi_struct.
2308  */
2309 void
2310 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2311 {
2312         unsigned long flags;
2313         dhd_info_t *dhd = dhdp->info;
2314         int curr_cpu;
2315         int on_cpu;
2316
2317         if (dhd->rx_napi_netdev == NULL) {
2318                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2319                 return;
2320         }
2321
2322         DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2323                 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2324
2325         /* append the producer's queue of packets to the napi's rx process queue */
2326         spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2327         skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2328         spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2329
2330         /*
2331          * If the destination CPU is NOT online or is same as current CPU
2332          * no need to schedule the work
2333          */
2334         curr_cpu = get_cpu();
2335         put_cpu();
2336
2337         on_cpu = atomic_read(&dhd->rx_napi_cpu);
2338
2339         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2340                 dhd_napi_schedule(dhd);
2341         } else {
2342                 schedule_work(&dhd->rx_napi_dispatcher_work);
2343         }
2344 }
2345
2346 /**
2347  * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2348  */
2349 void
2350 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2351 {
2352         dhd_info_t *dhd = dhdp->info;
2353
2354         DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2355                 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2356         DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2357         __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2358 }
2359 #endif /* DHD_LB_RXP */
2360
2361 #endif /* DHD_LB */
2362
2363 static void dhd_memdump_work_handler(struct work_struct * work)
2364 {
2365         struct dhd_info *dhd =
2366                 container_of(work, struct dhd_info, dhd_memdump_work.work);
2367
2368         BCM_REFERENCE(dhd);
2369 #ifdef BCMPCIE
2370         dhd_prot_collect_memdump(&dhd->pub);
2371 #endif
2372 }
2373
2374
2375 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2376 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2377 {
2378         dhd_if_t *ifp;
2379         dhd_info_t *dhd = dhdp->info;
2380         int i;
2381
2382         ASSERT(bssidx < DHD_MAX_IFS);
2383         ASSERT(dhdp);
2384
2385         for (i = 0; i < DHD_MAX_IFS; i++) {
2386                 ifp = dhd->iflist[i];
2387                 if (ifp && (ifp->bssidx == bssidx)) {
2388                         DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2389                                 ifp->name, bssidx, i));
2390                         break;
2391                 }
2392         }
2393         return i;
2394 }
2395
2396 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2397 {
2398         uint32 store_idx;
2399         uint32 sent_idx;
2400
2401         if (!skb) {
2402                 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2403                 return BCME_ERROR;
2404         }
2405
2406         dhd_os_rxflock(dhdp);
2407         store_idx = dhdp->store_idx;
2408         sent_idx = dhdp->sent_idx;
2409         if (dhdp->skbbuf[store_idx] != NULL) {
2410                 /* Make sure the previous packets are processed */
2411                 dhd_os_rxfunlock(dhdp);
2412 #ifdef RXF_DEQUEUE_ON_BUSY
2413                 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2414                         skb, store_idx, sent_idx));
2415                 return BCME_BUSY;
2416 #else /* RXF_DEQUEUE_ON_BUSY */
2417                 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2418                         skb, store_idx, sent_idx));
2419                 /* removed msleep here, should use wait_event_timeout if we
2420                  * want to give rx frame thread a chance to run
2421                  */
2422 #if defined(WAIT_DEQUEUE)
2423                 OSL_SLEEP(1);
2424 #endif
2425                 return BCME_ERROR;
2426 #endif /* RXF_DEQUEUE_ON_BUSY */
2427         }
2428         DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2429                 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2430         dhdp->skbbuf[store_idx] = skb;
2431         dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2432         dhd_os_rxfunlock(dhdp);
2433
2434         return BCME_OK;
2435 }
2436
2437 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2438 {
2439         uint32 store_idx;
2440         uint32 sent_idx;
2441         void *skb;
2442
2443         dhd_os_rxflock(dhdp);
2444
2445         store_idx = dhdp->store_idx;
2446         sent_idx = dhdp->sent_idx;
2447         skb = dhdp->skbbuf[sent_idx];
2448
2449         if (skb == NULL) {
2450                 dhd_os_rxfunlock(dhdp);
2451                 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2452                         store_idx, sent_idx));
2453                 return NULL;
2454         }
2455
2456         dhdp->skbbuf[sent_idx] = NULL;
2457         dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2458
2459         DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2460                 skb, sent_idx));
2461
2462         dhd_os_rxfunlock(dhdp);
2463
2464         return skb;
2465 }
2466
2467 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2468 {
2469         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2470
2471         if (prepost) { /* pre process */
2472                 dhd_read_macaddr(dhd);
2473         } else { /* post process */
2474                 dhd_write_macaddr(&dhd->pub.mac);
2475         }
2476
2477         return 0;
2478 }
2479
2480 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2481 static bool
2482 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2483 {
2484         bool _apply = FALSE;
2485         /* In case of IBSS mode, apply arp pkt filter */
2486         if (op_mode & DHD_FLAG_IBSS_MODE) {
2487                 _apply = TRUE;
2488                 goto exit;
2489         }
2490         /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2491         if ((dhd->arp_version == 1) &&
2492                 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2493                 _apply = TRUE;
2494                 goto exit;
2495         }
2496
2497 exit:
2498         return _apply;
2499 }
2500 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2501
2502 void dhd_set_packet_filter(dhd_pub_t *dhd)
2503 {
2504 #ifdef PKT_FILTER_SUPPORT
2505         int i;
2506
2507         DHD_TRACE(("%s: enter\n", __FUNCTION__));
2508         if (dhd_pkt_filter_enable) {
2509                 for (i = 0; i < dhd->pktfilter_count; i++) {
2510                         dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2511                 }
2512         }
2513 #endif /* PKT_FILTER_SUPPORT */
2514 }
2515
2516 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2517 {
2518 #ifdef PKT_FILTER_SUPPORT
2519         int i;
2520
2521         DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2522
2523         if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2524                 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2525                 return;
2526         }
2527         /* 1 - Enable packet filter, only allow unicast packet to send up */
2528         /* 0 - Disable packet filter */
2529         if (dhd_pkt_filter_enable && (!value ||
2530             (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2531         {
2532                 for (i = 0; i < dhd->pktfilter_count; i++) {
2533 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2534                         if (value && (i == DHD_ARP_FILTER_NUM) &&
2535                                 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2536                                 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2537                                         "val %d, cnt %d, op_mode 0x%x\n",
2538                                         value, i, dhd->op_mode));
2539                                 continue;
2540                         }
2541 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2542                         dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2543                                 value, dhd_master_mode);
2544                 }
2545         }
2546 #endif /* PKT_FILTER_SUPPORT */
2547 }
2548
2549 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2550 {
2551 #ifndef SUPPORT_PM2_ONLY
2552         int power_mode = PM_MAX;
2553 #endif /* SUPPORT_PM2_ONLY */
2554 #ifdef SUPPORT_SENSORHUB
2555         uint32 shub_msreq;
2556 #endif /* SUPPORT_SENSORHUB */
2557         /* wl_pkt_filter_enable_t       enable_parm; */
2558         char iovbuf[32];
2559         int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2560 #ifdef DHD_USE_EARLYSUSPEND
2561 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2562         int bcn_timeout = 0;
2563 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2564 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2565         int roam_time_thresh = 0;       /* (ms) */
2566 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2567 #ifndef ENABLE_FW_ROAM_SUSPEND
2568         uint roamvar = dhd->conf->roam_off_suspend;
2569 #endif /* ENABLE_FW_ROAM_SUSPEND */
2570 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2571         int bcn_li_bcn;
2572 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2573         uint nd_ra_filter = 0;
2574         int ret = 0;
2575 #endif /* DHD_USE_EARLYSUSPEND */
2576 #ifdef PASS_ALL_MCAST_PKTS
2577         struct dhd_info *dhdinfo;
2578         uint32 allmulti;
2579         uint i;
2580 #endif /* PASS_ALL_MCAST_PKTS */
2581 #ifdef DYNAMIC_SWOOB_DURATION
2582 #ifndef CUSTOM_INTR_WIDTH
2583 #define CUSTOM_INTR_WIDTH 100
2584         int intr_width = 0;
2585 #endif /* CUSTOM_INTR_WIDTH */
2586 #endif /* DYNAMIC_SWOOB_DURATION */
2587
2588         if (!dhd)
2589                 return -ENODEV;
2590
2591 #ifdef PASS_ALL_MCAST_PKTS
2592         dhdinfo = dhd->info;
2593 #endif /* PASS_ALL_MCAST_PKTS */
2594
2595         DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2596                 __FUNCTION__, value, dhd->in_suspend));
2597
2598         dhd_suspend_lock(dhd);
2599
2600 #ifdef CUSTOM_SET_CPUCORE
2601         DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2602         /* set specific cpucore */
2603         dhd_set_cpucore(dhd, TRUE);
2604 #endif /* CUSTOM_SET_CPUCORE */
2605 #ifndef SUPPORT_PM2_ONLY
2606         if (dhd->conf->pm >= 0)
2607                 power_mode = dhd->conf->pm;
2608 #endif /* SUPPORT_PM2_ONLY */
2609         if (dhd->up) {
2610                 if (value && dhd->in_suspend) {
2611 #ifdef PKT_FILTER_SUPPORT
2612                         dhd->early_suspended = 1;
2613 #endif
2614                         /* Kernel suspended */
2615                         DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2616
2617 #ifdef SUPPORT_SENSORHUB
2618                         shub_msreq = 1;
2619                         if (dhd->info->shub_enable == 1) {
2620                                 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2621                                         iovbuf, sizeof(iovbuf));
2622                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2623                                         iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2624                                         DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2625                                                 __FUNCTION__, ret));
2626                                 }
2627                         }
2628 #endif /* SUPPORT_SENSORHUB */
2629
2630 #ifndef SUPPORT_PM2_ONLY
2631                         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2632                                 sizeof(power_mode), TRUE, 0);
2633 #endif /* SUPPORT_PM2_ONLY */
2634
2635 #ifdef PKT_FILTER_SUPPORT
2636                         /* Enable packet filter,
2637                          * only allow unicast packet to send up
2638                          */
2639                         dhd_enable_packet_filter(1, dhd);
2640 #endif /* PKT_FILTER_SUPPORT */
2641
2642 #ifdef PASS_ALL_MCAST_PKTS
2643                         allmulti = 0;
2644                         bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2645                                 iovbuf, sizeof(iovbuf));
2646                         for (i = 0; i < DHD_MAX_IFS; i++) {
2647                                 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2648                                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2649                                                 sizeof(iovbuf), TRUE, i);
2650                         }
2651 #endif /* PASS_ALL_MCAST_PKTS */
2652
2653                         /* If DTIM skip is set up as default, force it to wake
2654                          * each third DTIM for better power savings.  Note that
2655                          * one side effect is a chance to miss BC/MC packet.
2656                          */
2657 #ifdef WLTDLS
2658                         /* Do not set bcn_li_ditm on WFD mode */
2659                         if (dhd->tdls_mode) {
2660                                 bcn_li_dtim = 0;
2661                         } else
2662 #endif /* WLTDLS */
2663                         bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2664                         bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2665                                 4, iovbuf, sizeof(iovbuf));
2666                         if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2667                                 TRUE, 0) < 0)
2668                                         DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2669
2670 #ifdef DHD_USE_EARLYSUSPEND
2671 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2672                         bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2673                         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2674                                 4, iovbuf, sizeof(iovbuf));
2675                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2676 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2677 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2678                         roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2679                         bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2680                                 4, iovbuf, sizeof(iovbuf));
2681                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2682 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2683 #ifndef ENABLE_FW_ROAM_SUSPEND
2684                         /* Disable firmware roaming during suspend */
2685                         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2686                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2687 #endif /* ENABLE_FW_ROAM_SUSPEND */
2688 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2689                         bcn_li_bcn = 0;
2690                         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2691                                 4, iovbuf, sizeof(iovbuf));
2692                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2693 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2694                         if (FW_SUPPORTED(dhd, ndoe)) {
2695                                 /* enable IPv6 RA filter in  firmware during suspend */
2696                                 nd_ra_filter = 1;
2697                                 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2698                                         iovbuf, sizeof(iovbuf));
2699                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2700                                         sizeof(iovbuf), TRUE, 0)) < 0)
2701                                         DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2702                                                 ret));
2703                         }
2704 #ifdef DYNAMIC_SWOOB_DURATION
2705                         intr_width = CUSTOM_INTR_WIDTH;
2706                         bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2707                                 iovbuf, sizeof(iovbuf));
2708                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2709                                         sizeof(iovbuf), TRUE, 0)) < 0) {
2710                                 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2711                         }
2712 #endif /* DYNAMIC_SWOOB_DURATION */
2713 #endif /* DHD_USE_EARLYSUSPEND */
2714                 } else {
2715 #ifdef PKT_FILTER_SUPPORT
2716                         dhd->early_suspended = 0;
2717 #endif
2718                         /* Kernel resumed  */
2719                         DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2720
2721 #ifdef SUPPORT_SENSORHUB
2722                         shub_msreq = 0;
2723                         if (dhd->info->shub_enable == 1) {
2724                                 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2725                                         4, iovbuf, sizeof(iovbuf));
2726                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2727                                         iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2728                                                 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2729                                                         "failed %d\n", __FUNCTION__, ret));
2730                                 }
2731                         }
2732 #endif /* SUPPORT_SENSORHUB */
2733
2734
2735 #ifdef DYNAMIC_SWOOB_DURATION
2736                         intr_width = 0;
2737                         bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2738                                 iovbuf, sizeof(iovbuf));
2739                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2740                                         sizeof(iovbuf), TRUE, 0)) < 0) {
2741                                 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2742                         }
2743 #endif /* DYNAMIC_SWOOB_DURATION */
2744 #ifndef SUPPORT_PM2_ONLY
2745                         power_mode = PM_FAST;
2746                         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2747                                 sizeof(power_mode), TRUE, 0);
2748 #endif /* SUPPORT_PM2_ONLY */
2749 #ifdef PKT_FILTER_SUPPORT
2750                         /* disable pkt filter */
2751                         dhd_enable_packet_filter(0, dhd);
2752 #endif /* PKT_FILTER_SUPPORT */
2753 #ifdef PASS_ALL_MCAST_PKTS
2754                         allmulti = 1;
2755                         bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2756                                 iovbuf, sizeof(iovbuf));
2757                         for (i = 0; i < DHD_MAX_IFS; i++) {
2758                                 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2759                                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2760                                                 sizeof(iovbuf), TRUE, i);
2761                         }
2762 #endif /* PASS_ALL_MCAST_PKTS */
2763
2764                         /* restore pre-suspend setting for dtim_skip */
2765                         bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2766                                 4, iovbuf, sizeof(iovbuf));
2767
2768                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2769 #ifdef DHD_USE_EARLYSUSPEND
2770 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2771                         bcn_timeout = CUSTOM_BCN_TIMEOUT;
2772                         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2773                                 4, iovbuf, sizeof(iovbuf));
2774                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2775 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2776 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2777                         roam_time_thresh = 2000;
2778                         bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2779                                 4, iovbuf, sizeof(iovbuf));
2780                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2781 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2782 #ifndef ENABLE_FW_ROAM_SUSPEND
2783                         roamvar = dhd_roam_disable;
2784                         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2785                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2786 #endif /* ENABLE_FW_ROAM_SUSPEND */
2787 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2788                         bcn_li_bcn = 1;
2789                         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2790                                 4, iovbuf, sizeof(iovbuf));
2791                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2792 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2793                         if (FW_SUPPORTED(dhd, ndoe)) {
2794                                 /* disable IPv6 RA filter in  firmware during suspend */
2795                                 nd_ra_filter = 0;
2796                                 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2797                                         iovbuf, sizeof(iovbuf));
2798                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2799                                         sizeof(iovbuf), TRUE, 0)) < 0)
2800                                         DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2801                                                 ret));
2802                         }
2803 #endif /* DHD_USE_EARLYSUSPEND */
2804                 }
2805         }
2806         dhd_suspend_unlock(dhd);
2807
2808         return 0;
2809 }
2810
2811 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2812 {
2813         dhd_pub_t *dhdp = &dhd->pub;
2814         int ret = 0;
2815
2816         DHD_OS_WAKE_LOCK(dhdp);
2817         DHD_PERIM_LOCK(dhdp);
2818
2819         /* Set flag when early suspend was called */
2820         dhdp->in_suspend = val;
2821         if ((force || !dhdp->suspend_disable_flag) &&
2822                 dhd_support_sta_mode(dhdp))
2823         {
2824                 ret = dhd_set_suspend(val, dhdp);
2825         }
2826
2827         DHD_PERIM_UNLOCK(dhdp);
2828         DHD_OS_WAKE_UNLOCK(dhdp);
2829         return ret;
2830 }
2831
2832 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2833 static void dhd_early_suspend(struct early_suspend *h)
2834 {
2835         struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2836         DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2837
2838         if (dhd)
2839                 dhd_suspend_resume_helper(dhd, 1, 0);
2840 }
2841
2842 static void dhd_late_resume(struct early_suspend *h)
2843 {
2844         struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2845         DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2846
2847         if (dhd)
2848                 dhd_suspend_resume_helper(dhd, 0, 0);
2849 }
2850 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2851
2852 /*
2853  * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
2854  * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
2855  *
2856  *      dhd_timeout_start(&tmo, usec);
2857  *      while (!dhd_timeout_expired(&tmo))
2858  *              if (poll_something())
2859  *                      break;
2860  *      if (dhd_timeout_expired(&tmo))
2861  *              fatal();
2862  */
2863
2864 void
2865 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2866 {
2867         tmo->limit = usec;
2868         tmo->increment = 0;
2869         tmo->elapsed = 0;
2870         tmo->tick = jiffies_to_usecs(1);
2871 }
2872
2873 int
2874 dhd_timeout_expired(dhd_timeout_t *tmo)
2875 {
2876         /* Does nothing the first call */
2877         if (tmo->increment == 0) {
2878                 tmo->increment = 1;
2879                 return 0;
2880         }
2881
2882         if (tmo->elapsed >= tmo->limit)
2883                 return 1;
2884
2885         /* Add the delay that's about to take place */
2886         tmo->elapsed += tmo->increment;
2887
2888         if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2889                 OSL_DELAY(tmo->increment);
2890                 tmo->increment *= 2;
2891                 if (tmo->increment > tmo->tick)
2892                         tmo->increment = tmo->tick;
2893         } else {
2894                 wait_queue_head_t delay_wait;
2895                 DECLARE_WAITQUEUE(wait, current);
2896                 init_waitqueue_head(&delay_wait);
2897                 add_wait_queue(&delay_wait, &wait);
2898                 set_current_state(TASK_INTERRUPTIBLE);
2899                 (void)schedule_timeout(1);
2900                 remove_wait_queue(&delay_wait, &wait);
2901                 set_current_state(TASK_RUNNING);
2902         }
2903
2904         return 0;
2905 }
2906
2907 int
2908 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2909 {
2910         int i = 0;
2911
2912         if (!dhd) {
2913                 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2914                 return DHD_BAD_IF;
2915         }
2916
2917         while (i < DHD_MAX_IFS) {
2918                 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2919                         return i;
2920                 i++;
2921         }
2922
2923         return DHD_BAD_IF;
2924 }
2925
2926 struct net_device * dhd_idx2net(void *pub, int ifidx)
2927 {
2928         struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2929         struct dhd_info *dhd_info;
2930
2931         if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2932                 return NULL;
2933         dhd_info = dhd_pub->info;
2934         if (dhd_info && dhd_info->iflist[ifidx])
2935                 return dhd_info->iflist[ifidx]->net;
2936         return NULL;
2937 }
2938
2939 int
2940 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2941 {
2942         int i = DHD_MAX_IFS;
2943
2944         ASSERT(dhd);
2945
2946         if (name == NULL || *name == '\0')
2947                 return 0;
2948
2949         while (--i > 0)
2950                 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2951                                 break;
2952
2953         DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2954
2955         return i;       /* default - the primary interface */
2956 }
2957
2958 char *
2959 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2960 {
2961         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2962
2963         ASSERT(dhd);
2964
2965         if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2966                 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2967                 return "<if_bad>";
2968         }
2969
2970         if (dhd->iflist[ifidx] == NULL) {
2971                 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2972                 return "<if_null>";
2973         }
2974
2975         if (dhd->iflist[ifidx]->net)
2976                 return dhd->iflist[ifidx]->net->name;
2977
2978         return "<if_none>";
2979 }
2980
2981 uint8 *
2982 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2983 {
2984         int i;
2985         dhd_info_t *dhd = (dhd_info_t *)dhdp;
2986
2987         ASSERT(dhd);
2988         for (i = 0; i < DHD_MAX_IFS; i++)
2989         if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2990                 return dhd->iflist[i]->mac_addr;
2991
2992         return NULL;
2993 }
2994
2995
2996 static void
2997 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2998 {
2999         struct net_device *dev;
3000 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3001         struct netdev_hw_addr *ha;
3002 #else
3003         struct dev_mc_list *mclist;
3004 #endif
3005         uint32 allmulti, cnt;
3006
3007         wl_ioctl_t ioc;
3008         char *buf, *bufp;
3009         uint buflen;
3010         int ret;
3011
3012         if (!dhd->iflist[ifidx]) {
3013                 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3014                 return;
3015         }
3016         dev = dhd->iflist[ifidx]->net;
3017         if (!dev)
3018                 return;
3019 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3020         netif_addr_lock_bh(dev);
3021 #endif /* LINUX >= 2.6.27 */
3022 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3023         cnt = netdev_mc_count(dev);
3024 #else
3025         cnt = dev->mc_count;
3026 #endif /* LINUX >= 2.6.35 */
3027 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3028         netif_addr_unlock_bh(dev);
3029 #endif /* LINUX >= 2.6.27 */
3030
3031         /* Determine initial value of allmulti flag */
3032         allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3033
3034 #ifdef PASS_ALL_MCAST_PKTS
3035 #ifdef PKT_FILTER_SUPPORT
3036         if (!dhd->pub.early_suspended)
3037 #endif /* PKT_FILTER_SUPPORT */
3038                 allmulti = TRUE;
3039 #endif /* PASS_ALL_MCAST_PKTS */
3040
3041         /* Send down the multicast list first. */
3042
3043
3044         buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3045         if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3046                 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3047                         dhd_ifname(&dhd->pub, ifidx), cnt));
3048                 return;
3049         }
3050
3051         strncpy(bufp, "mcast_list", buflen - 1);
3052         bufp[buflen - 1] = '\0';
3053         bufp += strlen("mcast_list") + 1;
3054
3055         cnt = htol32(cnt);
3056         memcpy(bufp, &cnt, sizeof(cnt));
3057         bufp += sizeof(cnt);
3058
3059 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3060         netif_addr_lock_bh(dev);
3061 #endif /* LINUX >= 2.6.27 */
3062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3063         netdev_for_each_mc_addr(ha, dev) {
3064                 if (!cnt)
3065                         break;
3066                 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3067                 bufp += ETHER_ADDR_LEN;
3068                 cnt--;
3069         }
3070 #else /* LINUX < 2.6.35 */
3071         for (mclist = dev->mc_list; (mclist && (cnt > 0));
3072                         cnt--, mclist = mclist->next) {
3073                 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3074                 bufp += ETHER_ADDR_LEN;
3075         }
3076 #endif /* LINUX >= 2.6.35 */
3077 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3078         netif_addr_unlock_bh(dev);
3079 #endif /* LINUX >= 2.6.27 */
3080
3081         memset(&ioc, 0, sizeof(ioc));
3082         ioc.cmd = WLC_SET_VAR;
3083         ioc.buf = buf;
3084         ioc.len = buflen;
3085         ioc.set = TRUE;
3086
3087         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3088         if (ret < 0) {
3089                 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3090                         dhd_ifname(&dhd->pub, ifidx), cnt));
3091                 allmulti = cnt ? TRUE : allmulti;
3092         }
3093
3094         MFREE(dhd->pub.osh, buf, buflen);
3095
3096         /* Now send the allmulti setting.  This is based on the setting in the
3097          * net_device flags, but might be modified above to be turned on if we
3098          * were trying to set some addresses and dongle rejected it...
3099          */
3100
3101         buflen = sizeof("allmulti") + sizeof(allmulti);
3102         if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3103                 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3104                 return;
3105         }
3106         allmulti = htol32(allmulti);
3107
3108         if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3109                 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3110                            dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3111                 MFREE(dhd->pub.osh, buf, buflen);
3112                 return;
3113         }
3114
3115
3116         memset(&ioc, 0, sizeof(ioc));
3117         ioc.cmd = WLC_SET_VAR;
3118         ioc.buf = buf;
3119         ioc.len = buflen;
3120         ioc.set = TRUE;
3121
3122         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3123         if (ret < 0) {
3124                 DHD_ERROR(("%s: set allmulti %d failed\n",
3125                            dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3126         }
3127
3128         MFREE(dhd->pub.osh, buf, buflen);
3129
3130         /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3131
3132         allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3133
3134         allmulti = htol32(allmulti);
3135
3136         memset(&ioc, 0, sizeof(ioc));
3137         ioc.cmd = WLC_SET_PROMISC;
3138         ioc.buf = &allmulti;
3139         ioc.len = sizeof(allmulti);
3140         ioc.set = TRUE;
3141
3142         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3143         if (ret < 0) {
3144                 DHD_ERROR(("%s: set promisc %d failed\n",
3145                            dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3146         }
3147 }
3148
3149 int
3150 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3151 {
3152         char buf[32];
3153         wl_ioctl_t ioc;
3154         int ret;
3155
3156         if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3157                 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3158                 return -1;
3159         }
3160         memset(&ioc, 0, sizeof(ioc));
3161         ioc.cmd = WLC_SET_VAR;
3162         ioc.buf = buf;
3163         ioc.len = 32;
3164         ioc.set = TRUE;
3165
3166         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3167         if (ret < 0) {
3168                 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3169         } else {
3170                 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3171                 if (ifidx == 0)
3172                         memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3173         }
3174
3175         return ret;
3176 }
3177
3178 #ifdef SOFTAP
3179 extern struct net_device *ap_net_dev;
3180 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3181 #endif
3182
3183 #ifdef DHD_PSTA
3184 /* Get psta/psr configuration configuration */
3185 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3186 {
3187         dhd_info_t *dhd = dhdp->info;
3188         return (int)dhd->psta_mode;
3189 }
3190 /* Set psta/psr configuration configuration */
3191 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3192 {
3193         dhd_info_t *dhd = dhdp->info;
3194         dhd->psta_mode = val;
3195         return 0;
3196 }
3197 #endif /* DHD_PSTA */
3198
3199 static void
3200 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3201 {
3202         dhd_info_t *dhd = handle;
3203         dhd_if_event_t *if_event = event_info;
3204         struct net_device *ndev;
3205         int ifidx, bssidx;
3206         int ret;
3207 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3208         struct wireless_dev *vwdev, *primary_wdev;
3209         struct net_device *primary_ndev;
3210 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3211
3212         if (event != DHD_WQ_WORK_IF_ADD) {
3213                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3214                 return;
3215         }
3216
3217         if (!dhd) {
3218                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3219                 return;
3220         }
3221
3222         if (!if_event) {
3223                 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3224                 return;
3225         }
3226
3227         dhd_net_if_lock_local(dhd);
3228         DHD_OS_WAKE_LOCK(&dhd->pub);
3229         DHD_PERIM_LOCK(&dhd->pub);
3230
3231         ifidx = if_event->event.ifidx;
3232         bssidx = if_event->event.bssidx;
3233         DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3234
3235         /* This path is for non-android case */
3236         /* The interface name in host and in event msg are same */
3237         /* if name in event msg is used to create dongle if list on host */
3238         ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3239                 if_event->mac, bssidx, TRUE, if_event->name);
3240         if (!ndev) {
3241                 DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
3242                 goto done;
3243         }
3244
3245 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3246         vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3247         if (unlikely(!vwdev)) {
3248                 DHD_ERROR(("Could not allocate wireless device\n"));
3249                 goto done;
3250         }
3251         primary_ndev = dhd->pub.info->iflist[0]->net;
3252         primary_wdev = ndev_to_wdev(primary_ndev);
3253         vwdev->wiphy = primary_wdev->wiphy;
3254         vwdev->iftype = if_event->event.role;
3255         vwdev->netdev = ndev;
3256         ndev->ieee80211_ptr = vwdev;
3257         SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3258         DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3259 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3260
3261         DHD_PERIM_UNLOCK(&dhd->pub);
3262         ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3263         DHD_PERIM_LOCK(&dhd->pub);
3264         if (ret != BCME_OK) {
3265                 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3266                 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3267                 goto done;
3268         }
3269 #ifdef PCIE_FULL_DONGLE
3270         /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3271         if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3272                 char iovbuf[WLC_IOCTL_SMLEN];
3273                 uint32 var_int =  1;
3274
3275                 memset(iovbuf, 0, sizeof(iovbuf));
3276                 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3277                 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3278
3279                 if (ret != BCME_OK) {
3280                         DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3281                         dhd_remove_if(&dhd->pub, ifidx, TRUE);
3282                 }
3283         }
3284 #endif /* PCIE_FULL_DONGLE */
3285
3286 done:
3287         MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3288
3289         DHD_PERIM_UNLOCK(&dhd->pub);
3290         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3291         dhd_net_if_unlock_local(dhd);
3292 }
3293
3294 static void
3295 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3296 {
3297         dhd_info_t *dhd = handle;
3298         int ifidx;
3299         dhd_if_event_t *if_event = event_info;
3300
3301
3302         if (event != DHD_WQ_WORK_IF_DEL) {
3303                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3304                 return;
3305         }
3306
3307         if (!dhd) {
3308                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3309                 return;
3310         }
3311
3312         if (!if_event) {
3313                 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3314                 return;
3315         }
3316
3317         dhd_net_if_lock_local(dhd);
3318         DHD_OS_WAKE_LOCK(&dhd->pub);
3319         DHD_PERIM_LOCK(&dhd->pub);
3320
3321         ifidx = if_event->event.ifidx;
3322         DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3323
3324         DHD_PERIM_UNLOCK(&dhd->pub);
3325         dhd_remove_if(&dhd->pub, ifidx, TRUE);
3326         DHD_PERIM_LOCK(&dhd->pub);
3327
3328         MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3329
3330         DHD_PERIM_UNLOCK(&dhd->pub);
3331         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3332         dhd_net_if_unlock_local(dhd);
3333 }
3334
3335 static void
3336 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3337 {
3338         dhd_info_t *dhd = handle;
3339         dhd_if_t *ifp = event_info;
3340
3341         if (event != DHD_WQ_WORK_SET_MAC) {
3342                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3343         }
3344
3345         if (!dhd) {
3346                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3347                 return;
3348         }
3349
3350         dhd_net_if_lock_local(dhd);
3351         DHD_OS_WAKE_LOCK(&dhd->pub);
3352         DHD_PERIM_LOCK(&dhd->pub);
3353
3354 #ifdef SOFTAP
3355         {
3356                 unsigned long flags;
3357                 bool in_ap = FALSE;
3358                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3359                 in_ap = (ap_net_dev != NULL);
3360                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3361
3362                 if (in_ap)  {
3363                         DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3364                                    ifp->net->name));
3365                         goto done;
3366                 }
3367         }
3368 #endif /* SOFTAP */
3369
3370         if (ifp == NULL || !dhd->pub.up) {
3371                 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3372                 goto done;
3373         }
3374
3375         DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3376         ifp->set_macaddress = FALSE;
3377         if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3378                 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3379         else
3380                 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3381
3382 done:
3383         DHD_PERIM_UNLOCK(&dhd->pub);
3384         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3385         dhd_net_if_unlock_local(dhd);
3386 }
3387
3388 static void
3389 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3390 {
3391         dhd_info_t *dhd = handle;
3392         dhd_if_t *ifp = event_info;
3393         int ifidx;
3394
3395         if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3396                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3397                 return;
3398         }
3399
3400         if (!dhd) {
3401                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3402                 return;
3403         }
3404
3405         dhd_net_if_lock_local(dhd);
3406         DHD_OS_WAKE_LOCK(&dhd->pub);
3407         DHD_PERIM_LOCK(&dhd->pub);
3408
3409 #ifdef SOFTAP
3410         {
3411                 bool in_ap = FALSE;
3412                 unsigned long flags;
3413                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3414                 in_ap = (ap_net_dev != NULL);
3415                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3416
3417                 if (in_ap)  {
3418                         DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3419                                    ifp->net->name));
3420                         ifp->set_multicast = FALSE;
3421                         goto done;
3422                 }
3423         }
3424 #endif /* SOFTAP */
3425
3426         if (ifp == NULL || !dhd->pub.up) {
3427                 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3428                 goto done;
3429         }
3430
3431         ifidx = ifp->idx;
3432
3433
3434         _dhd_set_multicast_list(dhd, ifidx);
3435         DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3436
3437 done:
3438         DHD_PERIM_UNLOCK(&dhd->pub);
3439         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3440         dhd_net_if_unlock_local(dhd);
3441 }
3442
3443 static int
3444 dhd_set_mac_address(struct net_device *dev, void *addr)
3445 {
3446         int ret = 0;
3447
3448         dhd_info_t *dhd = DHD_DEV_INFO(dev);
3449         struct sockaddr *sa = (struct sockaddr *)addr;
3450         int ifidx;
3451         dhd_if_t *dhdif;
3452
3453         ifidx = dhd_net2idx(dhd, dev);
3454         if (ifidx == DHD_BAD_IF)
3455                 return -1;
3456
3457         dhdif = dhd->iflist[ifidx];
3458
3459         dhd_net_if_lock_local(dhd);
3460         memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3461         dhdif->set_macaddress = TRUE;
3462         dhd_net_if_unlock_local(dhd);
3463         dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3464                 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3465         return ret;
3466 }
3467
3468 static void
3469 dhd_set_multicast_list(struct net_device *dev)
3470 {
3471         dhd_info_t *dhd = DHD_DEV_INFO(dev);
3472         int ifidx;
3473
3474         ifidx = dhd_net2idx(dhd, dev);
3475         if (ifidx == DHD_BAD_IF)
3476                 return;
3477
3478         dhd->iflist[ifidx]->set_multicast = TRUE;
3479         dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3480                 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
3481 }
3482
3483 #ifdef PROP_TXSTATUS
3484 int
3485 dhd_os_wlfc_block(dhd_pub_t *pub)
3486 {
3487         dhd_info_t *di = (dhd_info_t *)(pub->info);
3488         ASSERT(di != NULL);
3489         spin_lock_bh(&di->wlfc_spinlock);
3490         return 1;
3491 }
3492
3493 int
3494 dhd_os_wlfc_unblock(dhd_pub_t *pub)
3495 {
3496         dhd_info_t *di = (dhd_info_t *)(pub->info);
3497
3498         ASSERT(di != NULL);
3499         spin_unlock_bh(&di->wlfc_spinlock);
3500         return 1;
3501 }
3502
3503 #endif /* PROP_TXSTATUS */
3504
3505 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
3506 typedef struct {
3507         uint16 type;
3508         const char *str;
3509 } PKTTYPE_INFO;
3510
3511 static const PKTTYPE_INFO packet_type_info[] =
3512 {
3513         { ETHER_TYPE_IP, "IP" },
3514         { ETHER_TYPE_ARP, "ARP" },
3515         { ETHER_TYPE_BRCM, "BRCM" },
3516         { ETHER_TYPE_802_1X, "802.1X" },
3517         { ETHER_TYPE_WAI, "WAPI" },
3518         { 0, ""}
3519 };
3520
3521 static const char *_get_packet_type_str(uint16 type)
3522 {
3523         int i;
3524         int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
3525
3526         for (i = 0; i < n; i++) {
3527                 if (packet_type_info[i].type == type)
3528                         return packet_type_info[i].str;
3529         }
3530
3531         return packet_type_info[n].str;
3532 }
3533 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
3534
3535 #if defined(DHD_TX_DUMP)
3536 void
3537 dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
3538 {
3539         uint8 *dump_data;
3540         uint16 protocol;
3541         char *ifname;
3542
3543         dump_data = PKTDATA(osh, pkt);
3544         protocol = (dump_data[12] << 8) | dump_data[13];
3545         ifname = ndev ? ndev->name : "N/A";
3546
3547         DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
3548
3549         if (protocol == ETHER_TYPE_802_1X) {
3550                 dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
3551         }
3552
3553 #if defined(DHD_TX_FULL_DUMP)
3554         {
3555                 int i;
3556                 uint datalen;
3557                 datalen = PKTLEN(osh, pkt);
3558
3559                 for (i = 0; i < datalen; i++) {
3560                         printk("%02X ", dump_data[i]);
3561                         if ((i & 15) == 15)
3562                                 printk("\n");
3563                 }
3564                 printk("\n");
3565         }
3566 #endif /* DHD_TX_FULL_DUMP */
3567 }
3568 #endif /* DHD_TX_DUMP */
3569
3570 /*  This routine do not support Packet chain feature, Currently tested for
3571  *  proxy arp feature
3572  */
3573 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3574 {
3575         struct sk_buff *skb;
3576         void *skbhead = NULL;
3577         void *skbprev = NULL;
3578         dhd_if_t *ifp;
3579         ASSERT(!PKTISCHAINED(p));
3580         skb = PKTTONATIVE(dhdp->osh, p);
3581
3582         ifp = dhdp->info->iflist[ifidx];
3583         skb->dev = ifp->net;
3584 #if defined(BCM_GMAC3)
3585         /* Forwarder capable interfaces use WOFA based forwarding */
3586         if (ifp->fwdh) {
3587                 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
3588                 uint16 * da = (uint16 *)(eh->ether_dhost);
3589                 wofa_t wofa;
3590                 ASSERT(ISALIGNED(da, 2));
3591
3592                 wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
3593                 if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
3594                         if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
3595                                 return BCME_OK;
3596                         }
3597                 }
3598                 PKTFRMNATIVE(dhdp->osh, p);
3599                 PKTFREE(dhdp->osh, p, FALSE);
3600                 return BCME_OK;
3601         }
3602 #endif /* BCM_GMAC3 */
3603
3604         skb->protocol = eth_type_trans(skb, skb->dev);
3605
3606         if (in_interrupt()) {
3607                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3608                         __FUNCTION__, __LINE__);
3609                 netif_rx(skb);
3610         } else {
3611                 if (dhdp->info->rxthread_enabled) {
3612                         if (!skbhead) {
3613                                 skbhead = skb;
3614                         } else {
3615                                 PKTSETNEXT(dhdp->osh, skbprev, skb);
3616                         }
3617                         skbprev = skb;
3618                 } else {
3619                         /* If the receive is not processed inside an ISR,
3620                          * the softirqd must be woken explicitly to service
3621                          * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
3622                          * by netif_rx_ni(), but in earlier kernels, we need
3623                          * to do it manually.
3624                          */
3625                         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3626                                 __FUNCTION__, __LINE__);
3627 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3628                         netif_rx_ni(skb);
3629 #else
3630                         ulong flags;
3631                         netif_rx(skb);
3632                         local_irq_save(flags);
3633                         RAISE_RX_SOFTIRQ();
3634                         local_irq_restore(flags);
3635 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3636                 }
3637         }
3638
3639         if (dhdp->info->rxthread_enabled && skbhead)
3640                 dhd_sched_rxf(dhdp, skbhead);
3641
3642         return BCME_OK;
3643 }
3644
3645 int BCMFASTPATH
3646 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3647 {
3648         int ret = BCME_OK;
3649         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3650         struct ether_header *eh = NULL;
3651 #ifdef DHD_L2_FILTER
3652         dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3653 #endif
3654 #ifdef DHD_8021X_DUMP
3655         struct net_device *ndev;
3656 #endif /* DHD_8021X_DUMP */
3657
3658         /* Reject if down */
3659         if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3660                 /* free the packet here since the caller won't */
3661                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3662                 return -ENODEV;
3663         }
3664
3665 #ifdef PCIE_FULL_DONGLE
3666         if (dhdp->busstate == DHD_BUS_SUSPEND) {
3667                 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3668                 PKTFREE(dhdp->osh, pktbuf, TRUE);
3669                 return -EBUSY;
3670         }
3671 #endif /* PCIE_FULL_DONGLE */
3672
3673 #ifdef DHD_L2_FILTER
3674         /* if dhcp_unicast is enabled, we need to convert the */
3675         /* broadcast DHCP ACK/REPLY packets to Unicast. */
3676         if (ifp->dhcp_unicast) {
3677             uint8* mac_addr;
3678             uint8* ehptr = NULL;
3679             int ret;
3680             ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3681             if (ret == BCME_OK) {
3682                 /*  if given mac address having valid entry in sta list
3683                  *  copy the given mac address, and return with BCME_OK
3684                 */
3685                 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3686                     ehptr = PKTDATA(dhdp->osh, pktbuf);
3687                     bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3688                 }
3689             }
3690         }
3691
3692         if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3693             if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3694                         PKTCFREE(dhdp->osh, pktbuf, TRUE);
3695                         return BCME_ERROR;
3696             }
3697         }
3698
3699         if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3700                 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3701
3702                 /* Drop the packets if l2 filter has processed it already
3703                  * otherwise continue with the normal path
3704                  */
3705                 if (ret == BCME_OK) {
3706                         PKTCFREE(dhdp->osh, pktbuf, TRUE);
3707                         return BCME_ERROR;
3708                 }
3709         }
3710 #endif /* DHD_L2_FILTER */
3711         /* Update multicast statistic */
3712         if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3713                 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3714                 eh = (struct ether_header *)pktdata;
3715
3716                 if (ETHER_ISMULTI(eh->ether_dhost))
3717                         dhdp->tx_multicast++;
3718                 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
3719                         atomic_inc(&dhd->pend_8021x_cnt);
3720 #ifdef DHD_DHCP_DUMP
3721                 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3722                         uint16 dump_hex;
3723                         uint16 source_port;
3724                         uint16 dest_port;
3725                         uint16 udp_port_pos;
3726                         uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
3727                         uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3728                         struct net_device *net;
3729                         char *ifname;
3730
3731                         net = dhd_idx2net(dhdp, ifidx);
3732                         ifname = net ? net->name : "N/A";
3733                         udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3734                         source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
3735                         dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
3736                         if (source_port == 0x0044 || dest_port == 0x0044) {
3737                                 dump_hex = (pktdata[udp_port_pos+249] << 8) |
3738                                         pktdata[udp_port_pos+250];
3739                                 if (dump_hex == 0x0101) {
3740                                         DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
3741                                 } else if (dump_hex == 0x0102) {
3742                                         DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
3743                                 } else if (dump_hex == 0x0103) {
3744                                         DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
3745                                 } else if (dump_hex == 0x0105) {
3746                                         DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
3747                                 } else {
3748                                         DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
3749                                 }
3750 #ifdef DHD_LOSSLESS_ROAMING
3751                                 if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
3752                                         DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
3753                                 }
3754 #endif /* DHD_LOSSLESS_ROAMING */
3755                                 DHD_ERROR(("\n"));
3756                         } else if (source_port == 0x0043 || dest_port == 0x0043) {
3757                                 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
3758                         }
3759                 }
3760 #endif /* DHD_DHCP_DUMP */
3761         } else {
3762                         PKTCFREE(dhdp->osh, pktbuf, TRUE);
3763                         return BCME_ERROR;
3764         }
3765
3766         /* Look into the packet and update the packet priority */
3767 #ifndef PKTPRIO_OVERRIDE
3768         if (PKTPRIO(pktbuf) == 0)
3769 #endif /* !PKTPRIO_OVERRIDE */
3770         {
3771 #ifdef QOS_MAP_SET
3772                 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3773 #else
3774                 pktsetprio(pktbuf, FALSE);
3775 #endif /* QOS_MAP_SET */
3776         }
3777
3778
3779 #ifdef PCIE_FULL_DONGLE
3780         /*
3781          * Lkup the per interface hash table, for a matching flowring. If one is not
3782          * available, allocate a unique flowid and add a flowring entry.
3783          * The found or newly created flowid is placed into the pktbuf's tag.
3784          */
3785         ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3786         if (ret != BCME_OK) {
3787                 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3788                 return ret;
3789         }
3790 #endif
3791
3792 #if defined(DHD_TX_DUMP)
3793         ndev = dhd_idx2net(dhdp, ifidx);
3794         dhd_tx_dump(ndev, dhdp->osh, pktbuf);
3795 #endif
3796         /* terence 20150901: Micky add to ajust the 802.1X priority */
3797         /* Set the 802.1X packet with the highest priority 7 */
3798         if (dhdp->conf->pktprio8021x >= 0)
3799                 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3800
3801 #ifdef PROP_TXSTATUS
3802         if (dhd_wlfc_is_supported(dhdp)) {
3803                 /* store the interface ID */
3804                 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3805
3806                 /* store destination MAC in the tag as well */
3807                 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3808
3809                 /* decide which FIFO this packet belongs to */
3810                 if (ETHER_ISMULTI(eh->ether_dhost))
3811                         /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3812                         DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3813                 else
3814                         DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3815         } else
3816 #endif /* PROP_TXSTATUS */
3817         {
3818                 /* If the protocol uses a data header, apply it */
3819                 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3820         }
3821
3822         /* Use bus module to send data frame */
3823 #ifdef WLMEDIA_HTSF
3824         dhd_htsf_addtxts(dhdp, pktbuf);
3825 #endif
3826 #ifdef PROP_TXSTATUS
3827         {
3828                 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3829                         dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3830                         /* non-proptxstatus way */
3831 #ifdef BCMPCIE
3832                         ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3833 #else
3834                         ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3835 #endif /* BCMPCIE */
3836                 }
3837         }
3838 #else
3839 #ifdef BCMPCIE
3840         ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3841 #else
3842         ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3843 #endif /* BCMPCIE */
3844 #endif /* PROP_TXSTATUS */
3845
3846         return ret;
3847 }
3848
3849 int BCMFASTPATH
3850 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3851 {
3852         int ret = 0;
3853         unsigned long flags;
3854
3855         DHD_GENERAL_LOCK(dhdp, flags);
3856         if (dhdp->busstate == DHD_BUS_DOWN ||
3857                         dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3858                 DHD_ERROR(("%s: returning as busstate=%d\n",
3859                         __FUNCTION__, dhdp->busstate));
3860                 DHD_GENERAL_UNLOCK(dhdp, flags);
3861                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3862                 return -ENODEV;
3863         }
3864         dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
3865         DHD_GENERAL_UNLOCK(dhdp, flags);
3866
3867 #ifdef DHD_PCIE_RUNTIMEPM
3868         if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3869                 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3870                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3871                 ret = -EBUSY;
3872                 goto exit;
3873         }
3874 #endif /* DHD_PCIE_RUNTIMEPM */
3875
3876         ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3877
3878 #ifdef DHD_PCIE_RUNTIMEPM
3879 exit:
3880 #endif
3881         DHD_GENERAL_LOCK(dhdp, flags);
3882         dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
3883         DHD_GENERAL_UNLOCK(dhdp, flags);
3884         return ret;
3885 }
3886
3887 int BCMFASTPATH
3888 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3889 {
3890         int ret;
3891         uint datalen;
3892         void *pktbuf;
3893         dhd_info_t *dhd = DHD_DEV_INFO(net);
3894         dhd_if_t *ifp = NULL;
3895         int ifidx;
3896         unsigned long flags;
3897 #ifdef WLMEDIA_HTSF
3898         uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3899 #else
3900         uint8 htsfdlystat_sz = 0;
3901 #endif 
3902 #ifdef DHD_WMF
3903         struct ether_header *eh;
3904         uint8 *iph;
3905 #endif /* DHD_WMF */
3906
3907         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3908
3909
3910 #ifdef PCIE_FULL_DONGLE
3911         DHD_GENERAL_LOCK(&dhd->pub, flags);
3912         dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
3913         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3914 #endif /* PCIE_FULL_DONGLE */
3915
3916 #ifdef DHD_PCIE_RUNTIMEPM
3917         if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3918                 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3919                 /* stop the network queue temporarily until resume done */
3920                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3921                 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3922                         dhd_bus_stop_queue(dhd->pub.bus);
3923                 }
3924                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3925                 dhd_os_busbusy_wake(&dhd->pub);
3926                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3927 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3928                 return -ENODEV;
3929 #else
3930                 return NETDEV_TX_BUSY;
3931 #endif
3932         }
3933 #endif /* DHD_PCIE_RUNTIMEPM */
3934
3935         DHD_GENERAL_LOCK(&dhd->pub, flags);
3936 #ifdef PCIE_FULL_DONGLE
3937         if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3938                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3939                 dhd_os_busbusy_wake(&dhd->pub);
3940                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3941 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3942                 return -ENODEV;
3943 #else
3944                 return NETDEV_TX_BUSY;
3945 #endif
3946         }
3947 #endif /* PCIE_FULL_DONGLE */
3948
3949         DHD_OS_WAKE_LOCK(&dhd->pub);
3950         DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3951
3952         /* Reject if down */
3953         if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
3954                 dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3955                 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3956                         __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3957                 netif_stop_queue(net);
3958                 /* Send Event when bus down detected during data session */
3959                 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3960                         DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3961                         dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3962                         net_os_send_hang_message(net);
3963                 }
3964 #ifdef PCIE_FULL_DONGLE
3965                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3966                 dhd_os_busbusy_wake(&dhd->pub);
3967                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3968 #endif /* PCIE_FULL_DONGLE */
3969                 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3970                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3971 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3972                 return -ENODEV;
3973 #else
3974                 return NETDEV_TX_BUSY;
3975 #endif
3976         }
3977
3978         ifp = DHD_DEV_IFP(net);
3979         ifidx = DHD_DEV_IFIDX(net);
3980         BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
3981
3982         if (ifidx == DHD_BAD_IF) {
3983                 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
3984                 netif_stop_queue(net);
3985 #ifdef PCIE_FULL_DONGLE
3986                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3987                 dhd_os_busbusy_wake(&dhd->pub);
3988                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3989 #endif /* PCIE_FULL_DONGLE */
3990                 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3991                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3992 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3993                 return -ENODEV;
3994 #else
3995                 return NETDEV_TX_BUSY;
3996 #endif
3997         }
3998         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3999
4000         ASSERT(ifidx == dhd_net2idx(dhd, net));
4001         ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4002
4003         bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4004
4005         /* re-align socket buffer if "skb->data" is odd address */
4006         if (((unsigned long)(skb->data)) & 0x1) {
4007                 unsigned char *data = skb->data;
4008                 uint32 length = skb->len;
4009                 PKTPUSH(dhd->pub.osh, skb, 1);
4010                 memmove(skb->data, data, length);
4011                 PKTSETLEN(dhd->pub.osh, skb, length);
4012         }
4013
4014         datalen  = PKTLEN(dhd->pub.osh, skb);
4015
4016         /* Make sure there's enough room for any header */
4017         if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4018                 struct sk_buff *skb2;
4019
4020                 DHD_INFO(("%s: insufficient headroom\n",
4021                           dhd_ifname(&dhd->pub, ifidx)));
4022                 dhd->pub.tx_realloc++;
4023
4024                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4025                 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4026
4027                 dev_kfree_skb(skb);
4028                 if ((skb = skb2) == NULL) {
4029                         DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4030                                    dhd_ifname(&dhd->pub, ifidx)));
4031                         ret = -ENOMEM;
4032                         goto done;
4033                 }
4034                 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4035         }
4036
4037         /* Convert to packet */
4038         if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4039                 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4040                            dhd_ifname(&dhd->pub, ifidx)));
4041                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4042                 dev_kfree_skb_any(skb);
4043                 ret = -ENOMEM;
4044                 goto done;
4045         }
4046
4047 #if defined(WLMEDIA_HTSF)
4048         if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
4049                 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
4050                 struct ether_header *eh = (struct ether_header *)pktdata;
4051
4052                 if (!ETHER_ISMULTI(eh->ether_dhost) &&
4053                         (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
4054                         eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
4055                 }
4056         }
4057 #endif 
4058
4059 #ifdef DHD_WMF
4060         eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4061         iph = (uint8 *)eh + ETHER_HDR_LEN;
4062
4063         /* WMF processing for multicast packets
4064          * Only IPv4 packets are handled
4065          */
4066         if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4067                 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4068                 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4069 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4070                 void *sdu_clone;
4071                 bool ucast_convert = FALSE;
4072 #ifdef DHD_UCAST_UPNP
4073                 uint32 dest_ip;
4074
4075                 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4076                 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4077 #endif /* DHD_UCAST_UPNP */
4078 #ifdef DHD_IGMP_UCQUERY
4079                 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4080                         (IPV4_PROT(iph) == IP_PROT_IGMP) &&
4081                         (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4082 #endif /* DHD_IGMP_UCQUERY */
4083                 if (ucast_convert) {
4084                         dhd_sta_t *sta;
4085 #ifdef PCIE_FULL_DONGLE
4086                         unsigned long flags;
4087 #endif
4088                         struct list_head snapshot_list;
4089                         struct list_head *wmf_ucforward_list;
4090
4091                         ret = NETDEV_TX_OK;
4092
4093                         /* For non BCM_GMAC3 platform we need a snapshot sta_list to
4094                          * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4095                          */
4096                         wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4097
4098                         /* Convert upnp/igmp query to unicast for each assoc STA */
4099                         list_for_each_entry(sta, wmf_ucforward_list, list) {
4100                                 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4101                                         ret = WMF_NOP;
4102                                         break;
4103                                 }
4104                                 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4105                         }
4106                         DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4107
4108 #ifdef PCIE_FULL_DONGLE
4109                         DHD_GENERAL_LOCK(&dhd->pub, flags);
4110                         dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4111                         dhd_os_busbusy_wake(&dhd->pub);
4112                         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4113 #endif /* PCIE_FULL_DONGLE */
4114                         DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4115                         DHD_OS_WAKE_UNLOCK(&dhd->pub);
4116
4117                         if (ret == NETDEV_TX_OK)
4118                                 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4119
4120                         return ret;
4121                 } else
4122 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4123                 {
4124                         /* There will be no STA info if the packet is coming from LAN host
4125                          * Pass as NULL
4126                          */
4127                         ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4128                         switch (ret) {
4129                         case WMF_TAKEN:
4130                         case WMF_DROP:
4131                                 /* Either taken by WMF or we should drop it.
4132                                  * Exiting send path
4133                                  */
4134 #ifdef PCIE_FULL_DONGLE
4135                                 DHD_GENERAL_LOCK(&dhd->pub, flags);
4136                                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4137                                 dhd_os_busbusy_wake(&dhd->pub);
4138                                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4139 #endif /* PCIE_FULL_DONGLE */
4140                                 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4141                                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4142                                 return NETDEV_TX_OK;
4143                         default:
4144                                 /* Continue the transmit path */
4145                                 break;
4146                         }
4147                 }
4148         }
4149 #endif /* DHD_WMF */
4150 #ifdef DHD_PSTA
4151         /* PSR related packet proto manipulation should be done in DHD
4152          * since dongle doesn't have complete payload
4153          */
4154         if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
4155                 ifidx, &pktbuf, TRUE) < 0)) {
4156                         DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4157                                 dhd_ifname(&dhd->pub, ifidx)));
4158         }
4159 #endif /* DHD_PSTA */
4160
4161 #ifdef DHDTCPACK_SUPPRESS
4162         if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4163                 /* If this packet has been hold or got freed, just return */
4164                 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4165                         ret = 0;
4166                         goto done;
4167                 }
4168         } else {
4169                 /* If this packet has replaced another packet and got freed, just return */
4170                 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4171                         ret = 0;
4172                         goto done;
4173                 }
4174         }
4175 #endif /* DHDTCPACK_SUPPRESS */
4176
4177         /* no segmented SKB support (Kernel-3.18.y) */
4178         if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
4179                 PKTSETLINK(skb, NULL);
4180         }
4181
4182         ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4183
4184 done:
4185         if (ret) {
4186                 ifp->stats.tx_dropped++;
4187                 dhd->pub.tx_dropped++;
4188         } else {
4189
4190 #ifdef PROP_TXSTATUS
4191                 /* tx_packets counter can counted only when wlfc is disabled */
4192                 if (!dhd_wlfc_is_supported(&dhd->pub))
4193 #endif
4194                 {
4195                         dhd->pub.tx_packets++;
4196                         ifp->stats.tx_packets++;
4197                         ifp->stats.tx_bytes += datalen;
4198                 }
4199         }
4200
4201 #ifdef PCIE_FULL_DONGLE
4202         DHD_GENERAL_LOCK(&dhd->pub, flags);
4203         dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4204         dhd_os_busbusy_wake(&dhd->pub);
4205         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4206 #endif /* PCIE_FULL_DONGLE */
4207
4208         DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4209         DHD_OS_WAKE_UNLOCK(&dhd->pub);
4210         BUZZZ_LOG(START_XMIT_END, 0);
4211
4212         /* Return ok: we always eat the packet */
4213 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4214         return 0;
4215 #else
4216         return NETDEV_TX_OK;
4217 #endif
4218 }
4219
4220
4221 void
4222 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4223 {
4224         struct net_device *net;
4225         dhd_info_t *dhd = dhdp->info;
4226         int i;
4227
4228         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4229
4230         ASSERT(dhd);
4231
4232 #ifdef DHD_LOSSLESS_ROAMING
4233         /* block flowcontrol during roaming */
4234         if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4235                 return;
4236         }
4237 #endif
4238
4239         if (ifidx == ALL_INTERFACES) {
4240                 /* Flow control on all active interfaces */
4241                 dhdp->txoff = state;
4242                 for (i = 0; i < DHD_MAX_IFS; i++) {
4243                         if (dhd->iflist[i]) {
4244                                 net = dhd->iflist[i]->net;
4245                                 if (state == ON)
4246                                         netif_stop_queue(net);
4247                                 else
4248                                         netif_wake_queue(net);
4249                         }
4250                 }
4251         } else {
4252                 if (dhd->iflist[ifidx]) {
4253                         net = dhd->iflist[ifidx]->net;
4254                         if (state == ON)
4255                                 netif_stop_queue(net);
4256                         else
4257                                 netif_wake_queue(net);
4258                 }
4259         }
4260 }
4261
4262
4263 #ifdef DHD_WMF
4264 bool
4265 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4266 {
4267         dhd_info_t *dhd = dhdp->info;
4268
4269         return dhd->rxthread_enabled;
4270 }
4271 #endif /* DHD_WMF */
4272
4273 /** Called when a frame is received by the dongle on interface 'ifidx' */
4274 void
4275 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4276 {
4277         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4278         struct sk_buff *skb;
4279         uchar *eth;
4280         uint len;
4281         void *data, *pnext = NULL;
4282         int i;
4283         dhd_if_t *ifp;
4284         wl_event_msg_t event;
4285         int tout_rx = 0;
4286         int tout_ctrl = 0;
4287         void *skbhead = NULL;
4288         void *skbprev = NULL;
4289 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4290         char *dump_data;
4291         uint16 protocol;
4292         char *ifname;
4293 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4294
4295         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4296
4297         for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4298                 struct ether_header *eh;
4299
4300                 pnext = PKTNEXT(dhdp->osh, pktbuf);
4301                 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4302
4303                 ifp = dhd->iflist[ifidx];
4304                 if (ifp == NULL) {
4305                         DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4306                                 __FUNCTION__));
4307                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4308                         continue;
4309                 }
4310
4311                 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4312
4313                 /* Dropping only data packets before registering net device to avoid kernel panic */
4314 #ifndef PROP_TXSTATUS_VSDB
4315                 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4316                         (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4317 #else
4318                 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4319                         (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4320 #endif /* PROP_TXSTATUS_VSDB */
4321                 {
4322                         DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4323                         __FUNCTION__));
4324                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4325                         continue;
4326                 }
4327
4328
4329 #ifdef PROP_TXSTATUS
4330                 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4331                         /* WLFC may send header only packet when
4332                         there is an urgent message but no packet to
4333                         piggy-back on
4334                         */
4335                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4336                         continue;
4337                 }
4338 #endif
4339 #ifdef DHD_L2_FILTER
4340                 /* If block_ping is enabled drop the ping packet */
4341                 if (ifp->block_ping) {
4342                         if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4343                                 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4344                                 continue;
4345                         }
4346                 }
4347                 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4348                     if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4349                                 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4350                                 continue;
4351                     }
4352                 }
4353                 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4354                         int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4355
4356                         /* Drop the packets if l2 filter has processed it already
4357                          * otherwise continue with the normal path
4358                          */
4359                         if (ret == BCME_OK) {
4360                                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4361                                 continue;
4362                         }
4363                 }
4364 #endif /* DHD_L2_FILTER */
4365 #ifdef DHD_WMF
4366                 /* WMF processing for multicast packets */
4367                 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
4368                         dhd_sta_t *sta;
4369                         int ret;
4370
4371                         sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
4372                         ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
4373                         switch (ret) {
4374                                 case WMF_TAKEN:
4375                                         /* The packet is taken by WMF. Continue to next iteration */
4376                                         continue;
4377                                 case WMF_DROP:
4378                                         /* Packet DROP decision by WMF. Toss it */
4379                                         DHD_ERROR(("%s: WMF decides to drop packet\n",
4380                                                 __FUNCTION__));
4381                                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4382                                         continue;
4383                                 default:
4384                                         /* Continue the transmit path */
4385                                         break;
4386                         }
4387                 }
4388 #endif /* DHD_WMF */
4389
4390 #ifdef DHDTCPACK_SUPPRESS
4391                 dhd_tcpdata_info_get(dhdp, pktbuf);
4392 #endif
4393                 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4394
4395                 ASSERT(ifp);
4396                 skb->dev = ifp->net;
4397
4398 #ifdef DHD_PSTA
4399                 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4400                                 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4401                                         dhd_ifname(dhdp, ifidx)));
4402                 }
4403 #endif /* DHD_PSTA */
4404
4405 #ifdef PCIE_FULL_DONGLE
4406                 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4407                         (!ifp->ap_isolate)) {
4408                         eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4409                         if (ETHER_ISUCAST(eh->ether_dhost)) {
4410                                 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4411                                         dhd_sendpkt(dhdp, ifidx, pktbuf);
4412                                         continue;
4413                                 }
4414                         } else {
4415                                 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4416                                 if (npktbuf)
4417                                         dhd_sendpkt(dhdp, ifidx, npktbuf);
4418                         }
4419                 }
4420 #endif /* PCIE_FULL_DONGLE */
4421
4422                 /* Get the protocol, maintain skb around eth_type_trans()
4423                  * The main reason for this hack is for the limitation of
4424                  * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4425                  * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4426                  * coping of the packet coming from the network stack to add
4427                  * BDC, Hardware header etc, during network interface registration
4428                  * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4429                  * for BDC, Hardware header etc. and not just the ETH_HLEN
4430                  */
4431                 eth = skb->data;
4432                 len = skb->len;
4433
4434 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4435                 dump_data = skb->data;
4436                 protocol = (dump_data[12] << 8) | dump_data[13];
4437                 ifname = skb->dev ? skb->dev->name : "N/A";
4438 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4439 #ifdef DHD_8021X_DUMP
4440                 if (protocol == ETHER_TYPE_802_1X) {
4441                         dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
4442                 }
4443 #endif /* DHD_8021X_DUMP */
4444 #ifdef DHD_DHCP_DUMP
4445                 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
4446                         uint16 dump_hex;
4447                         uint16 source_port;
4448                         uint16 dest_port;
4449                         uint16 udp_port_pos;
4450                         uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
4451                         uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
4452
4453                         udp_port_pos = ETHER_HDR_LEN + ip_header_len;
4454                         source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
4455                         dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
4456                         if (source_port == 0x0044 || dest_port == 0x0044) {
4457                                 dump_hex = (dump_data[udp_port_pos+249] << 8) |
4458                                         dump_data[udp_port_pos+250];
4459                                 if (dump_hex == 0x0101) {
4460                                         DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
4461                                 } else if (dump_hex == 0x0102) {
4462                                         DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
4463                                 } else if (dump_hex == 0x0103) {
4464                                         DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
4465                                 } else if (dump_hex == 0x0105) {
4466                                         DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
4467                                 } else {
4468                                         DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
4469                                 }
4470                         } else if (source_port == 0x0043 || dest_port == 0x0043) {
4471                                 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
4472                         }
4473                 }
4474 #endif /* DHD_DHCP_DUMP */
4475 #if defined(DHD_RX_DUMP)
4476                 DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
4477                 if (protocol != ETHER_TYPE_BRCM) {
4478                         if (dump_data[0] == 0xFF) {
4479                                 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
4480
4481                                 if ((dump_data[12] == 8) &&
4482                                         (dump_data[13] == 6)) {
4483                                         DHD_ERROR(("%s: ARP %d\n",
4484                                                 __FUNCTION__, dump_data[0x15]));
4485                                 }
4486                         } else if (dump_data[0] & 1) {
4487                                 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
4488                                         __FUNCTION__, MAC2STRDBG(dump_data)));
4489                         }
4490 #ifdef DHD_RX_FULL_DUMP
4491                         {
4492                                 int k;
4493                                 for (k = 0; k < skb->len; k++) {
4494                                         printk("%02X ", dump_data[k]);
4495                                         if ((k & 15) == 15)
4496                                                 printk("\n");
4497                                 }
4498                                 printk("\n");
4499                         }
4500 #endif /* DHD_RX_FULL_DUMP */
4501                 }
4502 #endif /* DHD_RX_DUMP */
4503
4504                 skb->protocol = eth_type_trans(skb, skb->dev);
4505
4506                 if (skb->pkt_type == PACKET_MULTICAST) {
4507                         dhd->pub.rx_multicast++;
4508                         ifp->stats.multicast++;
4509                 }
4510
4511                 skb->data = eth;
4512                 skb->len = len;
4513
4514 #ifdef WLMEDIA_HTSF
4515                 dhd_htsf_addrxts(dhdp, pktbuf);
4516 #endif
4517                 /* Strip header, count, deliver upward */
4518                 skb_pull(skb, ETH_HLEN);
4519
4520                 /* Process special event packets and then discard them */
4521                 memset(&event, 0, sizeof(event));
4522                 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4523                         dhd_wl_host_event(dhd, &ifidx,
4524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4525                         skb_mac_header(skb),
4526 #else
4527                         skb->mac.raw,
4528 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4529                         &event,
4530                         &data);
4531
4532                         wl_event_to_host_order(&event);
4533                         if (!tout_ctrl)
4534                                 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4535
4536 #if defined(PNO_SUPPORT)
4537                         if (event.event_type == WLC_E_PFN_NET_FOUND) {
4538                                 /* enforce custom wake lock to garantee that Kernel not suspended */
4539                                 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4540                         }
4541 #endif /* PNO_SUPPORT */
4542
4543 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
4544 #ifdef DHD_USE_STATIC_CTRLBUF
4545                         PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4546 #else
4547                         PKTFREE(dhdp->osh, pktbuf, FALSE);
4548 #endif /* DHD_USE_STATIC_CTRLBUF */
4549                         continue;
4550 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
4551                 } else {
4552                         tout_rx = DHD_PACKET_TIMEOUT_MS;
4553
4554 #ifdef PROP_TXSTATUS
4555                         dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4556 #endif /* PROP_TXSTATUS */
4557                 }
4558
4559                 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4560                 ifp = dhd->iflist[ifidx];
4561
4562                 if (ifp->net)
4563                         ifp->net->last_rx = jiffies;
4564
4565                 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4566                         dhdp->dstats.rx_bytes += skb->len;
4567                         dhdp->rx_packets++; /* Local count */
4568                         ifp->stats.rx_bytes += skb->len;
4569                         ifp->stats.rx_packets++;
4570                 }
4571
4572                 if (in_interrupt()) {
4573                         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4574                                 __FUNCTION__, __LINE__);
4575                         DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4576 #if defined(DHD_LB) && defined(DHD_LB_RXP)
4577                         netif_receive_skb(skb);
4578 #else
4579                         netif_rx(skb);
4580 #endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4581                         DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4582                 } else {
4583                         if (dhd->rxthread_enabled) {
4584                                 if (!skbhead)
4585                                         skbhead = skb;
4586                                 else
4587                                         PKTSETNEXT(dhdp->osh, skbprev, skb);
4588                                 skbprev = skb;
4589                         } else {
4590
4591                                 /* If the receive is not processed inside an ISR,
4592                                  * the softirqd must be woken explicitly to service
4593                                  * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
4594                                  * by netif_rx_ni(), but in earlier kernels, we need
4595                                  * to do it manually.
4596                                  */
4597                                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4598                                         __FUNCTION__, __LINE__);
4599
4600 #if defined(DHD_LB) && defined(DHD_LB_RXP)
4601                                 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4602                                 netif_receive_skb(skb);
4603                                 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4604 #else
4605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4606                                 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4607                                 netif_rx_ni(skb);
4608                                 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4609 #else
4610                                 ulong flags;
4611                                 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4612                                 netif_rx(skb);
4613                                 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4614                                 local_irq_save(flags);
4615                                 RAISE_RX_SOFTIRQ();
4616                                 local_irq_restore(flags);
4617 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4618 #endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4619                         }
4620                 }
4621         }
4622
4623         if (dhd->rxthread_enabled && skbhead)
4624                 dhd_sched_rxf(dhdp, skbhead);
4625
4626         DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4627         DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4628         DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
4629 }
4630
4631 void
4632 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4633 {
4634         /* Linux version has nothing to do */
4635         return;
4636 }
4637
4638 void
4639 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4640 {
4641         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4642         struct ether_header *eh;
4643         uint16 type;
4644
4645         dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4646
4647         eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4648         type  = ntoh16(eh->ether_type);
4649
4650         if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
4651                 atomic_dec(&dhd->pend_8021x_cnt);
4652
4653 #ifdef PROP_TXSTATUS
4654         if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4655                 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4656                 uint datalen  = PKTLEN(dhd->pub.osh, txp);
4657                 if (ifp != NULL) {
4658                         if (success) {
4659                                 dhd->pub.tx_packets++;
4660                                 ifp->stats.tx_packets++;
4661                                 ifp->stats.tx_bytes += datalen;
4662                         } else {
4663                                 ifp->stats.tx_dropped++;
4664                         }
4665                 }
4666         }
4667 #endif
4668 }
4669
4670 static struct net_device_stats *
4671 dhd_get_stats(struct net_device *net)
4672 {
4673         dhd_info_t *dhd = DHD_DEV_INFO(net);
4674         dhd_if_t *ifp;
4675         int ifidx;
4676
4677         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4678
4679         ifidx = dhd_net2idx(dhd, net);
4680         if (ifidx == DHD_BAD_IF) {
4681                 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4682
4683                 memset(&net->stats, 0, sizeof(net->stats));
4684                 return &net->stats;
4685         }
4686
4687         ifp = dhd->iflist[ifidx];
4688         ASSERT(dhd && ifp);
4689
4690         if (dhd->pub.up) {
4691                 /* Use the protocol to get dongle stats */
4692                 dhd_prot_dstats(&dhd->pub);
4693         }
4694         return &ifp->stats;
4695 }
4696
4697 static int
4698 dhd_watchdog_thread(void *data)
4699 {
4700         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4701         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4702         /* This thread doesn't need any user-level access,
4703          * so get rid of all our resources
4704          */
4705         if (dhd_watchdog_prio > 0) {
4706                 struct sched_param param;
4707                 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
4708                         dhd_watchdog_prio:(MAX_RT_PRIO-1);
4709                 setScheduler(current, SCHED_FIFO, &param);
4710         }
4711
4712         while (1) {
4713                 if (down_interruptible (&tsk->sema) == 0) {
4714                         unsigned long flags;
4715                         unsigned long jiffies_at_start = jiffies;
4716                         unsigned long time_lapse;
4717
4718                         DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4719                         SMP_RD_BARRIER_DEPENDS();
4720                         if (tsk->terminated) {
4721                                 break;
4722                         }
4723
4724                         if (dhd->pub.dongle_reset == FALSE) {
4725                                 DHD_TIMER(("%s:\n", __FUNCTION__));
4726                                 dhd_bus_watchdog(&dhd->pub);
4727
4728                                 DHD_GENERAL_LOCK(&dhd->pub, flags);
4729                                 /* Count the tick for reference */
4730                                 dhd->pub.tickcnt++;
4731 #ifdef DHD_L2_FILTER
4732                                 dhd_l2_filter_watchdog(&dhd->pub);
4733 #endif /* DHD_L2_FILTER */
4734                                 time_lapse = jiffies - jiffies_at_start;
4735
4736                                 /* Reschedule the watchdog */
4737                                 if (dhd->wd_timer_valid) {
4738                                         mod_timer(&dhd->timer,
4739                                             jiffies +
4740                                             msecs_to_jiffies(dhd_watchdog_ms) -
4741                                             min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
4742                                 }
4743                                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4744                         }
4745                         DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4746                 } else {
4747                         break;
4748                 }
4749         }
4750
4751         complete_and_exit(&tsk->completed, 0);
4752 }
4753
4754 static void dhd_watchdog(ulong data)
4755 {
4756         dhd_info_t *dhd = (dhd_info_t *)data;
4757         unsigned long flags;
4758
4759         if (dhd->pub.dongle_reset) {
4760                 return;
4761         }
4762
4763         if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4764                 DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
4765                 return;
4766         }
4767
4768         if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4769                 up(&dhd->thr_wdt_ctl.sema);
4770                 return;
4771         }
4772
4773         DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4774         /* Call the bus module watchdog */
4775         dhd_bus_watchdog(&dhd->pub);
4776         DHD_GENERAL_LOCK(&dhd->pub, flags);
4777         /* Count the tick for reference */
4778         dhd->pub.tickcnt++;
4779
4780 #ifdef DHD_L2_FILTER
4781         dhd_l2_filter_watchdog(&dhd->pub);
4782 #endif /* DHD_L2_FILTER */
4783         /* Reschedule the watchdog */
4784         if (dhd->wd_timer_valid)
4785                 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
4786         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4787         DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4788 }
4789
4790 #ifdef DHD_PCIE_RUNTIMEPM
4791 static int
4792 dhd_rpm_state_thread(void *data)
4793 {
4794         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4795         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4796
4797         while (1) {
4798                 if (down_interruptible (&tsk->sema) == 0) {
4799                         unsigned long flags;
4800                         unsigned long jiffies_at_start = jiffies;
4801                         unsigned long time_lapse;
4802
4803                         SMP_RD_BARRIER_DEPENDS();
4804                         if (tsk->terminated) {
4805                                 break;
4806                         }
4807
4808                         if (dhd->pub.dongle_reset == FALSE) {
4809                                 DHD_TIMER(("%s:\n", __FUNCTION__));
4810                                 if (dhd->pub.up) {
4811                                         dhd_runtimepm_state(&dhd->pub);
4812                                 }
4813
4814                                 DHD_GENERAL_LOCK(&dhd->pub, flags);
4815                                 time_lapse = jiffies - jiffies_at_start;
4816
4817                                 /* Reschedule the watchdog */
4818                                 if (dhd->rpm_timer_valid) {
4819                                         mod_timer(&dhd->rpm_timer,
4820                                                 jiffies +
4821                                                 msecs_to_jiffies(dhd_runtimepm_ms) -
4822                                                 min(msecs_to_jiffies(dhd_runtimepm_ms),
4823                                                         time_lapse));
4824                                 }
4825                                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4826                         }
4827                 } else {
4828                         break;
4829                 }
4830         }
4831
4832         complete_and_exit(&tsk->completed, 0);
4833 }
4834
4835 static void dhd_runtimepm(ulong data)
4836 {
4837         dhd_info_t *dhd = (dhd_info_t *)data;
4838
4839         if (dhd->pub.dongle_reset) {
4840                 return;
4841         }
4842
4843         if (dhd->thr_rpm_ctl.thr_pid >= 0) {
4844                 up(&dhd->thr_rpm_ctl.sema);
4845                 return;
4846         }
4847 }
4848
4849 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
4850 {
4851         dhd_os_runtimepm_timer(dhdp, 0);
4852         dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
4853         DHD_ERROR(("DHD Runtime PM Disabled \n"));
4854 }
4855
4856 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
4857 {
4858         dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
4859         DHD_ERROR(("DHD Runtime PM Enabled \n"));
4860 }
4861
4862 #endif /* DHD_PCIE_RUNTIMEPM */
4863
4864
4865 #ifdef ENABLE_ADAPTIVE_SCHED
4866 static void
4867 dhd_sched_policy(int prio)
4868 {
4869         struct sched_param param;
4870         if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
4871                 param.sched_priority = 0;
4872                 setScheduler(current, SCHED_NORMAL, &param);
4873         } else {
4874                 if (get_scheduler_policy(current) != SCHED_FIFO) {
4875                         param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
4876                         setScheduler(current, SCHED_FIFO, &param);
4877                 }
4878         }
4879 }
4880 #endif /* ENABLE_ADAPTIVE_SCHED */
4881 #ifdef DEBUG_CPU_FREQ
4882 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
4883 {
4884         dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
4885         struct cpufreq_freqs *freq = data;
4886         if (dhd) {
4887                 if (!dhd->new_freq)
4888                         goto exit;
4889                 if (val == CPUFREQ_POSTCHANGE) {
4890                         DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
4891                                 freq->new, freq->cpu));
4892                         *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
4893                 }
4894         }
4895 exit:
4896         return 0;
4897 }
4898 #endif /* DEBUG_CPU_FREQ */
4899 static int
4900 dhd_dpc_thread(void *data)
4901 {
4902         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4903         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4904
4905         /* This thread doesn't need any user-level access,
4906          * so get rid of all our resources
4907          */
4908         if (dhd_dpc_prio > 0)
4909         {
4910                 struct sched_param param;
4911                 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
4912                 setScheduler(current, SCHED_FIFO, &param);
4913         }
4914
4915 #ifdef CUSTOM_DPC_CPUCORE
4916         set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
4917 #else
4918         if (dhd->pub.conf->dpc_cpucore >= 0) {
4919                 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
4920                 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
4921         }
4922 #endif
4923 #ifdef CUSTOM_SET_CPUCORE
4924         dhd->pub.current_dpc = current;
4925 #endif /* CUSTOM_SET_CPUCORE */
4926         /* Run until signal received */
4927         while (1) {
4928                 if (!binary_sema_down(tsk)) {
4929 #ifdef ENABLE_ADAPTIVE_SCHED
4930                         dhd_sched_policy(dhd_dpc_prio);
4931 #endif /* ENABLE_ADAPTIVE_SCHED */
4932                         SMP_RD_BARRIER_DEPENDS();
4933                         if (tsk->terminated) {
4934                                 break;
4935                         }
4936
4937                         /* Call bus dpc unless it indicated down (then clean stop) */
4938                         if (dhd->pub.busstate != DHD_BUS_DOWN) {
4939 #ifdef DEBUG_DPC_THREAD_WATCHDOG
4940                                 int resched_cnt = 0;
4941 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
4942                                 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
4943                                 while (dhd_bus_dpc(dhd->pub.bus)) {
4944                                         /* process all data */
4945 #ifdef DEBUG_DPC_THREAD_WATCHDOG
4946                                         resched_cnt++;
4947                                         if (resched_cnt > MAX_RESCHED_CNT) {
4948                                                 DHD_INFO(("%s Calling msleep to"
4949                                                         "let other processes run. \n",
4950                                                         __FUNCTION__));
4951                                                 dhd->pub.dhd_bug_on = true;
4952                                                 resched_cnt = 0;
4953                                                 OSL_SLEEP(1);
4954                                         }
4955 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
4956                                 }
4957                                 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
4958                                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4959                         } else {
4960                                 if (dhd->pub.up)
4961                                         dhd_bus_stop(dhd->pub.bus, TRUE);
4962                                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4963                         }
4964                 } else {
4965                         break;
4966                 }
4967         }
4968         complete_and_exit(&tsk->completed, 0);
4969 }
4970
4971 static int
4972 dhd_rxf_thread(void *data)
4973 {
4974         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4975         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4976 #if defined(WAIT_DEQUEUE)
4977 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
4978         ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
4979 #endif
4980         dhd_pub_t *pub = &dhd->pub;
4981
4982         /* This thread doesn't need any user-level access,
4983          * so get rid of all our resources
4984          */
4985         if (dhd_rxf_prio > 0)
4986         {
4987                 struct sched_param param;
4988                 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
4989                 setScheduler(current, SCHED_FIFO, &param);
4990         }
4991
4992         DAEMONIZE("dhd_rxf");
4993         /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
4994
4995         /*  signal: thread has started */
4996         complete(&tsk->completed);
4997 #ifdef CUSTOM_SET_CPUCORE
4998         dhd->pub.current_rxf = current;
4999 #endif /* CUSTOM_SET_CPUCORE */
5000         /* Run until signal received */
5001         while (1) {
5002                 if (down_interruptible(&tsk->sema) == 0) {
5003                         void *skb;
5004 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
5005                         ulong flags;
5006 #endif
5007 #ifdef ENABLE_ADAPTIVE_SCHED
5008                         dhd_sched_policy(dhd_rxf_prio);
5009 #endif /* ENABLE_ADAPTIVE_SCHED */
5010
5011                         SMP_RD_BARRIER_DEPENDS();
5012
5013                         if (tsk->terminated) {
5014                                 break;
5015                         }
5016                         skb = dhd_rxf_dequeue(pub);
5017
5018                         if (skb == NULL) {
5019                                 continue;
5020                         }
5021                         while (skb) {
5022                                 void *skbnext = PKTNEXT(pub->osh, skb);
5023                                 PKTSETNEXT(pub->osh, skb, NULL);
5024                                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5025                                         __FUNCTION__, __LINE__);
5026 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5027                                 netif_rx_ni(skb);
5028 #else
5029                                 netif_rx(skb);
5030                                 local_irq_save(flags);
5031                                 RAISE_RX_SOFTIRQ();
5032                                 local_irq_restore(flags);
5033
5034 #endif
5035                                 skb = skbnext;
5036                         }
5037 #if defined(WAIT_DEQUEUE)
5038                         if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5039                                 OSL_SLEEP(1);
5040                                 watchdogTime = OSL_SYSUPTIME();
5041                         }
5042 #endif
5043
5044                         DHD_OS_WAKE_UNLOCK(pub);
5045                 } else {
5046                         break;
5047                 }
5048         }
5049         complete_and_exit(&tsk->completed, 0);
5050 }
5051
5052 #ifdef BCMPCIE
5053 void dhd_dpc_enable(dhd_pub_t *dhdp)
5054 {
5055         dhd_info_t *dhd;
5056
5057         if (!dhdp || !dhdp->info)
5058                 return;
5059         dhd = dhdp->info;
5060
5061 #ifdef DHD_LB
5062 #ifdef DHD_LB_RXP
5063         __skb_queue_head_init(&dhd->rx_pend_queue);
5064 #endif /* DHD_LB_RXP */
5065 #ifdef DHD_LB_TXC
5066         if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
5067                 tasklet_enable(&dhd->tx_compl_tasklet);
5068 #endif /* DHD_LB_TXC */
5069 #ifdef DHD_LB_RXC
5070         if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
5071                 tasklet_enable(&dhd->rx_compl_tasklet);
5072 #endif /* DHD_LB_RXC */
5073 #endif /* DHD_LB */
5074         if (atomic_read(&dhd->tasklet.count) ==  1)
5075                 tasklet_enable(&dhd->tasklet);
5076 }
5077 #endif /* BCMPCIE */
5078
5079
5080 #ifdef BCMPCIE
5081 void
5082 dhd_dpc_kill(dhd_pub_t *dhdp)
5083 {
5084         dhd_info_t *dhd;
5085
5086         if (!dhdp) {
5087                 return;
5088         }
5089
5090         dhd = dhdp->info;
5091
5092         if (!dhd) {
5093                 return;
5094         }
5095
5096         if (dhd->thr_dpc_ctl.thr_pid < 0) {
5097                 tasklet_disable(&dhd->tasklet);
5098                 tasklet_kill(&dhd->tasklet);
5099                 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5100         }
5101 #if defined(DHD_LB)
5102 #ifdef DHD_LB_RXP
5103         __skb_queue_purge(&dhd->rx_pend_queue);
5104 #endif /* DHD_LB_RXP */
5105         /* Kill the Load Balancing Tasklets */
5106 #if defined(DHD_LB_TXC)
5107         tasklet_disable(&dhd->tx_compl_tasklet);
5108         tasklet_kill(&dhd->tx_compl_tasklet);
5109 #endif /* DHD_LB_TXC */
5110 #if defined(DHD_LB_RXC)
5111         tasklet_disable(&dhd->rx_compl_tasklet);
5112         tasklet_kill(&dhd->rx_compl_tasklet);
5113 #endif /* DHD_LB_RXC */
5114 #endif /* DHD_LB */
5115 }
5116 #endif /* BCMPCIE */
5117
5118 static void
5119 dhd_dpc(ulong data)
5120 {
5121         dhd_info_t *dhd;
5122
5123         dhd = (dhd_info_t *)data;
5124
5125         /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5126          * down below , wake lock is set,
5127          * the tasklet is initialized in dhd_attach()
5128          */
5129         /* Call bus dpc unless it indicated down (then clean stop) */
5130         if (dhd->pub.busstate != DHD_BUS_DOWN) {
5131                 if (dhd_bus_dpc(dhd->pub.bus)) {
5132                         DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5133                         tasklet_schedule(&dhd->tasklet);
5134                 }
5135         } else {
5136                 dhd_bus_stop(dhd->pub.bus, TRUE);
5137         }
5138 }
5139
5140 void
5141 dhd_sched_dpc(dhd_pub_t *dhdp)
5142 {
5143         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5144
5145         if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5146                 DHD_OS_WAKE_LOCK(dhdp);
5147                 /* If the semaphore does not get up,
5148                 * wake unlock should be done here
5149                 */
5150                 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5151                         DHD_OS_WAKE_UNLOCK(dhdp);
5152                 }
5153                 return;
5154         } else {
5155                 tasklet_schedule(&dhd->tasklet);
5156         }
5157 }
5158
5159 static void
5160 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5161 {
5162         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5163 #ifdef RXF_DEQUEUE_ON_BUSY
5164         int ret = BCME_OK;
5165         int retry = 2;
5166 #endif /* RXF_DEQUEUE_ON_BUSY */
5167
5168         DHD_OS_WAKE_LOCK(dhdp);
5169
5170         DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5171 #ifdef RXF_DEQUEUE_ON_BUSY
5172         do {
5173                 ret = dhd_rxf_enqueue(dhdp, skb);
5174                 if (ret == BCME_OK || ret == BCME_ERROR)
5175                         break;
5176                 else
5177                         OSL_SLEEP(50); /* waiting for dequeueing */
5178         } while (retry-- > 0);
5179
5180         if (retry <= 0 && ret == BCME_BUSY) {
5181                 void *skbp = skb;
5182
5183                 while (skbp) {
5184                         void *skbnext = PKTNEXT(dhdp->osh, skbp);
5185                         PKTSETNEXT(dhdp->osh, skbp, NULL);
5186                         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5187                                 __FUNCTION__, __LINE__);
5188                         netif_rx_ni(skbp);
5189                         skbp = skbnext;
5190                 }
5191                 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
5192         } else {
5193                 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5194                         up(&dhd->thr_rxf_ctl.sema);
5195                 }
5196         }
5197 #else /* RXF_DEQUEUE_ON_BUSY */
5198         do {
5199                 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5200                         break;
5201         } while (1);
5202         if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5203                 up(&dhd->thr_rxf_ctl.sema);
5204         }
5205         return;
5206 #endif /* RXF_DEQUEUE_ON_BUSY */
5207 }
5208
5209 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5210 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5211
5212 #ifdef TOE
5213 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5214 static int
5215 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5216 {
5217         wl_ioctl_t ioc;
5218         char buf[32];
5219         int ret;
5220
5221         memset(&ioc, 0, sizeof(ioc));
5222
5223         ioc.cmd = WLC_GET_VAR;
5224         ioc.buf = buf;
5225         ioc.len = (uint)sizeof(buf);
5226         ioc.set = FALSE;
5227
5228         strncpy(buf, "toe_ol", sizeof(buf) - 1);
5229         buf[sizeof(buf) - 1] = '\0';
5230         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5231                 /* Check for older dongle image that doesn't support toe_ol */
5232                 if (ret == -EIO) {
5233                         DHD_ERROR(("%s: toe not supported by device\n",
5234                                 dhd_ifname(&dhd->pub, ifidx)));
5235                         return -EOPNOTSUPP;
5236                 }
5237
5238                 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5239                 return ret;
5240         }
5241
5242         memcpy(toe_ol, buf, sizeof(uint32));
5243         return 0;
5244 }
5245
5246 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5247 static int
5248 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5249 {
5250         wl_ioctl_t ioc;
5251         char buf[32];
5252         int toe, ret;
5253
5254         memset(&ioc, 0, sizeof(ioc));
5255
5256         ioc.cmd = WLC_SET_VAR;
5257         ioc.buf = buf;
5258         ioc.len = (uint)sizeof(buf);
5259         ioc.set = TRUE;
5260
5261         /* Set toe_ol as requested */
5262
5263         strncpy(buf, "toe_ol", sizeof(buf) - 1);
5264         buf[sizeof(buf) - 1] = '\0';
5265         memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
5266
5267         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5268                 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5269                         dhd_ifname(&dhd->pub, ifidx), ret));
5270                 return ret;
5271         }
5272
5273         /* Enable toe globally only if any components are enabled. */
5274
5275         toe = (toe_ol != 0);
5276
5277         strcpy(buf, "toe");
5278         memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
5279
5280         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5281                 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5282                 return ret;
5283         }
5284
5285         return 0;
5286 }
5287 #endif /* TOE */
5288
5289 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5290 void dhd_set_scb_probe(dhd_pub_t *dhd)
5291 {
5292         int ret = 0;
5293         wl_scb_probe_t scb_probe;
5294         char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5295
5296         memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
5297
5298         if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5299                 return;
5300         }
5301
5302         bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
5303
5304         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
5305                 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5306         }
5307
5308         memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5309
5310         scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5311
5312         bcm_mkiovar("scb_probe", (char *)&scb_probe,
5313                 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
5314         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5315                 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5316                 return;
5317         }
5318 }
5319 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5320
5321 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
5322 static void
5323 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5324 {
5325         dhd_info_t *dhd = DHD_DEV_INFO(net);
5326
5327         snprintf(info->driver, sizeof(info->driver), "wl");
5328         snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5329 }
5330
5331 struct ethtool_ops dhd_ethtool_ops = {
5332         .get_drvinfo = dhd_ethtool_get_drvinfo
5333 };
5334 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
5335
5336
5337 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5338 static int
5339 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5340 {
5341         struct ethtool_drvinfo info;
5342         char drvname[sizeof(info.driver)];
5343         uint32 cmd;
5344 #ifdef TOE
5345         struct ethtool_value edata;
5346         uint32 toe_cmpnt, csum_dir;
5347         int ret;
5348 #endif
5349
5350         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5351
5352         /* all ethtool calls start with a cmd word */
5353         if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5354                 return -EFAULT;
5355
5356         switch (cmd) {
5357         case ETHTOOL_GDRVINFO:
5358                 /* Copy out any request driver name */
5359                 if (copy_from_user(&info, uaddr, sizeof(info)))
5360                         return -EFAULT;
5361                 strncpy(drvname, info.driver, sizeof(info.driver));
5362                 drvname[sizeof(info.driver)-1] = '\0';
5363
5364                 /* clear struct for return */
5365                 memset(&info, 0, sizeof(info));
5366                 info.cmd = cmd;
5367
5368                 /* if dhd requested, identify ourselves */
5369                 if (strcmp(drvname, "?dhd") == 0) {
5370                         snprintf(info.driver, sizeof(info.driver), "dhd");
5371                         strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5372                         info.version[sizeof(info.version) - 1] = '\0';
5373                 }
5374
5375                 /* otherwise, require dongle to be up */
5376                 else if (!dhd->pub.up) {
5377                         DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5378                         return -ENODEV;
5379                 }
5380
5381                 /* finally, report dongle driver type */
5382                 else if (dhd->pub.iswl)
5383                         snprintf(info.driver, sizeof(info.driver), "wl");
5384                 else
5385                         snprintf(info.driver, sizeof(info.driver), "xx");
5386
5387                 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5388                 if (copy_to_user(uaddr, &info, sizeof(info)))
5389                         return -EFAULT;
5390                 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5391                          (int)sizeof(drvname), drvname, info.driver));
5392                 break;
5393
5394 #ifdef TOE
5395         /* Get toe offload components from dongle */
5396         case ETHTOOL_GRXCSUM:
5397         case ETHTOOL_GTXCSUM:
5398                 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5399                         return ret;
5400
5401                 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5402
5403                 edata.cmd = cmd;
5404                 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5405
5406                 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5407                         return -EFAULT;
5408                 break;
5409
5410         /* Set toe offload components in dongle */
5411         case ETHTOOL_SRXCSUM:
5412         case ETHTOOL_STXCSUM:
5413                 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5414                         return -EFAULT;
5415
5416                 /* Read the current settings, update and write back */
5417                 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5418                         return ret;
5419
5420                 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5421
5422                 if (edata.data != 0)
5423                         toe_cmpnt |= csum_dir;
5424                 else
5425                         toe_cmpnt &= ~csum_dir;
5426
5427                 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5428                         return ret;
5429
5430                 /* If setting TX checksum mode, tell Linux the new mode */
5431                 if (cmd == ETHTOOL_STXCSUM) {
5432                         if (edata.data)
5433                                 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5434                         else
5435                                 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5436                 }
5437
5438                 break;
5439 #endif /* TOE */
5440
5441         default:
5442                 return -EOPNOTSUPP;
5443         }
5444
5445         return 0;
5446 }
5447 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5448
5449 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5450 {
5451         dhd_info_t *dhd;
5452
5453         if (!dhdp) {
5454                 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5455                 return FALSE;
5456         }
5457
5458         if (!dhdp->up)
5459                 return FALSE;
5460
5461         dhd = (dhd_info_t *)dhdp->info;
5462 #if !defined(BCMPCIE)
5463         if (dhd->thr_dpc_ctl.thr_pid < 0) {
5464                 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5465                 return FALSE;
5466         }
5467 #endif 
5468
5469         if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5470                 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5471 #ifdef BCMPCIE
5472                 DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d d3acke=%d e=%d s=%d\n",
5473                         __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5474                         dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5475 #else
5476                 DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5477                         dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5478 #endif /* BCMPCIE */
5479                 if (dhdp->hang_reason == 0) {
5480                         if (dhdp->dongle_trap_occured) {
5481                                 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5482 #ifdef BCMPCIE
5483                         } else if (dhdp->d3ackcnt_timeout) {
5484                                 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
5485 #endif /* BCMPCIE */
5486                         } else {
5487                                 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
5488                         }
5489                 }
5490                 net_os_send_hang_message(net);
5491                 return TRUE;
5492         }
5493         return FALSE;
5494 }
5495
5496 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
5497 {
5498         int bcmerror = BCME_OK;
5499         int buflen = 0;
5500         struct net_device *net;
5501
5502         net = dhd_idx2net(pub, ifidx);
5503         if (!net) {
5504                 bcmerror = BCME_BADARG;
5505                 goto done;
5506         }
5507
5508         if (data_buf)
5509                 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
5510
5511         /* check for local dhd ioctl and handle it */
5512         if (ioc->driver == DHD_IOCTL_MAGIC) {
5513                 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
5514                 if (bcmerror)
5515                         pub->bcmerror = bcmerror;
5516                 goto done;
5517         }
5518
5519         /* send to dongle (must be up, and wl). */
5520         if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
5521                 if (allow_delay_fwdl) {
5522                         int ret = dhd_bus_start(pub);
5523                         if (ret != 0) {
5524                                 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5525                                 bcmerror = BCME_DONGLE_DOWN;
5526                                 goto done;
5527                         }
5528                 } else {
5529                         bcmerror = BCME_DONGLE_DOWN;
5530                         goto done;
5531                 }
5532         }
5533
5534         if (!pub->iswl) {
5535                 bcmerror = BCME_DONGLE_DOWN;
5536                 goto done;
5537         }
5538
5539         /*
5540          * Flush the TX queue if required for proper message serialization:
5541          * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
5542          * prevent M4 encryption and
5543          * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
5544          * prevent disassoc frame being sent before WPS-DONE frame.
5545          */
5546         if (ioc->cmd == WLC_SET_KEY ||
5547             (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5548              strncmp("wsec_key", data_buf, 9) == 0) ||
5549             (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5550              strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
5551             ioc->cmd == WLC_DISASSOC)
5552                 dhd_wait_pend8021x(net);
5553
5554 #ifdef WLMEDIA_HTSF
5555         if (data_buf) {
5556                 /*  short cut wl ioctl calls here  */
5557                 if (strcmp("htsf", data_buf) == 0) {
5558                         dhd_ioctl_htsf_get(dhd, 0);
5559                         return BCME_OK;
5560                 }
5561
5562                 if (strcmp("htsflate", data_buf) == 0) {
5563                         if (ioc->set) {
5564                                 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
5565                                 memset(&maxdelayts, 0, sizeof(tstamp_t));
5566                                 maxdelay = 0;
5567                                 tspktcnt = 0;
5568                                 maxdelaypktno = 0;
5569                                 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5570                                 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5571                                 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5572                                 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5573                         } else {
5574                                 dhd_dump_latency();
5575                         }
5576                         return BCME_OK;
5577                 }
5578                 if (strcmp("htsfclear", data_buf) == 0) {
5579                         memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5580                         memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5581                         memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5582                         memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5583                         htsf_seqnum = 0;
5584                         return BCME_OK;
5585                 }
5586                 if (strcmp("htsfhis", data_buf) == 0) {
5587                         dhd_dump_htsfhisto(&vi_d1, "H to D");
5588                         dhd_dump_htsfhisto(&vi_d2, "D to D");
5589                         dhd_dump_htsfhisto(&vi_d3, "D to H");
5590                         dhd_dump_htsfhisto(&vi_d4, "H to H");
5591                         return BCME_OK;
5592                 }
5593                 if (strcmp("tsport", data_buf) == 0) {
5594                         if (ioc->set) {
5595                                 memcpy(&tsport, data_buf + 7, 4);
5596                         } else {
5597                                 DHD_ERROR(("current timestamp port: %d \n", tsport));
5598                         }
5599                         return BCME_OK;
5600                 }
5601         }
5602 #endif /* WLMEDIA_HTSF */
5603
5604         if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
5605                 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
5606 #ifdef BCM_FD_AGGR
5607                 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5608 #else
5609                 bcmerror = BCME_UNSUPPORTED;
5610 #endif
5611                 goto done;
5612         }
5613
5614 #ifdef DHD_DEBUG
5615         if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
5616                 if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
5617                         /* Print  IOVAR Information */
5618                         DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
5619                                 __FUNCTION__, (char *)data_buf, ioc->set));
5620                         if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5621                                 prhex(NULL, data_buf + strlen(data_buf) + 1,
5622                                         buflen - strlen(data_buf) - 1);
5623                         }
5624                 } else {
5625                         /* Print  IOCTL Information */
5626                         DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
5627                                 __FUNCTION__, ioc->cmd, ioc->set));
5628                         if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5629                                 prhex(NULL, data_buf, buflen);
5630                         }
5631                 }
5632         }
5633 #endif /* DHD_DEBUG */
5634
5635         bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5636
5637 done:
5638         dhd_check_hang(net, pub, bcmerror);
5639
5640         return bcmerror;
5641 }
5642
5643 static int
5644 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
5645 {
5646         dhd_info_t *dhd = DHD_DEV_INFO(net);
5647         dhd_ioctl_t ioc;
5648         int ifidx;
5649         int ret;
5650         void *local_buf = NULL;
5651         u16 buflen = 0;
5652
5653         DHD_OS_WAKE_LOCK(&dhd->pub);
5654         DHD_PERIM_LOCK(&dhd->pub);
5655
5656         /* Interface up check for built-in type */
5657         if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
5658                 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
5659                 ret = BCME_NOTUP;
5660                 goto exit;
5661         }
5662
5663         /* send to dongle only if we are not waiting for reload already */
5664         if (dhd->pub.hang_was_sent) {
5665                 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
5666                 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
5667                 ret = BCME_DONGLE_DOWN;
5668                 goto exit;
5669         }
5670
5671         ifidx = dhd_net2idx(dhd, net);
5672         DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
5673
5674         if (ifidx == DHD_BAD_IF) {
5675                 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
5676                 ret = -1;
5677                 goto exit;
5678         }
5679
5680 #if defined(WL_WIRELESS_EXT)
5681         /* linux wireless extensions */
5682         if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
5683                 /* may recurse, do NOT lock */
5684                 ret = wl_iw_ioctl(net, ifr, cmd);
5685                 goto exit;
5686         }
5687 #endif /* defined(WL_WIRELESS_EXT) */
5688
5689 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5690         if (cmd == SIOCETHTOOL) {
5691                 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
5692                 goto exit;
5693         }
5694 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5695
5696         if (cmd == SIOCDEVPRIVATE+1) {
5697                 ret = wl_android_priv_cmd(net, ifr, cmd);
5698                 dhd_check_hang(net, &dhd->pub, ret);
5699                 goto exit;
5700         }
5701
5702         if (cmd != SIOCDEVPRIVATE) {
5703                 ret = -EOPNOTSUPP;
5704                 goto exit;
5705         }
5706
5707         memset(&ioc, 0, sizeof(ioc));
5708
5709 #ifdef CONFIG_COMPAT
5710         if (is_compat_task()) {
5711                 compat_wl_ioctl_t compat_ioc;
5712                 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
5713                         ret = BCME_BADADDR;
5714                         goto done;
5715                 }
5716                 ioc.cmd = compat_ioc.cmd;
5717                 ioc.buf = compat_ptr(compat_ioc.buf);
5718                 ioc.len = compat_ioc.len;
5719                 ioc.set = compat_ioc.set;
5720                 ioc.used = compat_ioc.used;
5721                 ioc.needed = compat_ioc.needed;
5722                 /* To differentiate between wl and dhd read 4 more byes */
5723                 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
5724                         sizeof(uint)) != 0)) {
5725                         ret = BCME_BADADDR;
5726                         goto done;
5727                 }
5728         } else
5729 #endif /* CONFIG_COMPAT */
5730         {
5731                 /* Copy the ioc control structure part of ioctl request */
5732                 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5733                         ret = BCME_BADADDR;
5734                         goto done;
5735                 }
5736
5737                 /* To differentiate between wl and dhd read 4 more byes */
5738                 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5739                         sizeof(uint)) != 0)) {
5740                         ret = BCME_BADADDR;
5741                         goto done;
5742                 }
5743         }
5744
5745         if (!capable(CAP_NET_ADMIN)) {
5746                 ret = BCME_EPERM;
5747                 goto done;
5748         }
5749
5750         if (ioc.len > 0) {
5751                 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
5752                 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
5753                         ret = BCME_NOMEM;
5754                         goto done;
5755                 }
5756
5757                 DHD_PERIM_UNLOCK(&dhd->pub);
5758                 if (copy_from_user(local_buf, ioc.buf, buflen)) {
5759                         DHD_PERIM_LOCK(&dhd->pub);
5760                         ret = BCME_BADADDR;
5761                         goto done;
5762                 }
5763                 DHD_PERIM_LOCK(&dhd->pub);
5764
5765                 *(char *)(local_buf + buflen) = '\0';
5766         }
5767
5768         ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
5769
5770         if (!ret && buflen && local_buf && ioc.buf) {
5771                 DHD_PERIM_UNLOCK(&dhd->pub);
5772                 if (copy_to_user(ioc.buf, local_buf, buflen))
5773                         ret = -EFAULT;
5774                 DHD_PERIM_LOCK(&dhd->pub);
5775         }
5776
5777 done:
5778         if (local_buf)
5779                 MFREE(dhd->pub.osh, local_buf, buflen+1);
5780
5781 exit:
5782         DHD_PERIM_UNLOCK(&dhd->pub);
5783         DHD_OS_WAKE_UNLOCK(&dhd->pub);
5784
5785         return OSL_ERROR(ret);
5786 }
5787
5788
5789 #ifdef FIX_CPU_MIN_CLOCK
5790 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
5791 {
5792         if (dhd) {
5793 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5794                 mutex_init(&dhd->cpufreq_fix);
5795 #endif
5796                 dhd->cpufreq_fix_status = FALSE;
5797         }
5798         return 0;
5799 }
5800
5801 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
5802 {
5803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5804         mutex_lock(&dhd->cpufreq_fix);
5805 #endif
5806         if (dhd && !dhd->cpufreq_fix_status) {
5807                 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
5808 #ifdef FIX_BUS_MIN_CLOCK
5809                 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
5810 #endif /* FIX_BUS_MIN_CLOCK */
5811                 DHD_ERROR(("pm_qos_add_requests called\n"));
5812
5813                 dhd->cpufreq_fix_status = TRUE;
5814         }
5815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5816         mutex_unlock(&dhd->cpufreq_fix);
5817 #endif
5818 }
5819
5820 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
5821 {
5822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5823         mutex_lock(&dhd ->cpufreq_fix);
5824 #endif
5825         if (dhd && dhd->cpufreq_fix_status != TRUE) {
5826 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5827                 mutex_unlock(&dhd->cpufreq_fix);
5828 #endif
5829                 return;
5830         }
5831
5832         pm_qos_remove_request(&dhd->dhd_cpu_qos);
5833 #ifdef FIX_BUS_MIN_CLOCK
5834         pm_qos_remove_request(&dhd->dhd_bus_qos);
5835 #endif /* FIX_BUS_MIN_CLOCK */
5836         DHD_ERROR(("pm_qos_add_requests called\n"));
5837
5838         dhd->cpufreq_fix_status = FALSE;
5839 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5840         mutex_unlock(&dhd->cpufreq_fix);
5841 #endif
5842 }
5843 #endif /* FIX_CPU_MIN_CLOCK */
5844
5845 #define MAX_TRY_CNT             5 /* Number of tries to disable deepsleep */
5846 int dhd_deepsleep(dhd_info_t *dhd, int flag)
5847 {
5848         char iovbuf[20];
5849         uint powervar = 0;
5850         dhd_pub_t *dhdp;
5851         int cnt = 0;
5852         int ret = 0;
5853
5854         dhdp = &dhd->pub;
5855
5856         switch (flag) {
5857                 case 1 :  /* Deepsleep on */
5858                         DHD_ERROR(("dhd_deepsleep: ON\n"));
5859                         /* give some time to sysioc_work before deepsleep */
5860                         OSL_SLEEP(200);
5861 #ifdef PKT_FILTER_SUPPORT
5862                         /* disable pkt filter */
5863                         dhd_enable_packet_filter(0, dhdp);
5864 #endif /* PKT_FILTER_SUPPORT */
5865                         /* Disable MPC */
5866                         powervar = 0;
5867                         memset(iovbuf, 0, sizeof(iovbuf));
5868                         bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5869                         dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5870
5871                         /* Enable Deepsleep */
5872                         powervar = 1;
5873                         memset(iovbuf, 0, sizeof(iovbuf));
5874                         bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5875                         dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5876                         break;
5877
5878                 case 0: /* Deepsleep Off */
5879                         DHD_ERROR(("dhd_deepsleep: OFF\n"));
5880
5881                         /* Disable Deepsleep */
5882                         for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
5883                                 powervar = 0;
5884                                 memset(iovbuf, 0, sizeof(iovbuf));
5885                                 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5886                                         iovbuf, sizeof(iovbuf));
5887                                 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
5888                                         sizeof(iovbuf), TRUE, 0);
5889
5890                                 memset(iovbuf, 0, sizeof(iovbuf));
5891                                 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5892                                         iovbuf, sizeof(iovbuf));
5893                                 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
5894                                         sizeof(iovbuf), FALSE, 0)) < 0) {
5895                                         DHD_ERROR(("the error of dhd deepsleep status"
5896                                                 " ret value :%d\n", ret));
5897                                 } else {
5898                                         if (!(*(int *)iovbuf)) {
5899                                                 DHD_ERROR(("deepsleep mode is 0,"
5900                                                         " count: %d\n", cnt));
5901                                                 break;
5902                                         }
5903                                 }
5904                         }
5905
5906                         /* Enable MPC */
5907                         powervar = 1;
5908                         memset(iovbuf, 0, sizeof(iovbuf));
5909                         bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5910                         dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5911                         break;
5912         }
5913
5914         return 0;
5915 }
5916
5917 static int
5918 dhd_stop(struct net_device *net)
5919 {
5920         int ifidx = 0;
5921         dhd_info_t *dhd = DHD_DEV_INFO(net);
5922         DHD_OS_WAKE_LOCK(&dhd->pub);
5923         DHD_PERIM_LOCK(&dhd->pub);
5924         printf("%s: Enter %p\n", __FUNCTION__, net);
5925         dhd->pub.rxcnt_timeout = 0;
5926         dhd->pub.txcnt_timeout = 0;
5927
5928 #ifdef BCMPCIE
5929         dhd->pub.d3ackcnt_timeout = 0;
5930 #endif /* BCMPCIE */
5931
5932         if (dhd->pub.up == 0) {
5933                 goto exit;
5934         }
5935
5936         dhd_if_flush_sta(DHD_DEV_IFP(net));
5937
5938         /* Disable Runtime PM before interface down */
5939         DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5940
5941 #ifdef FIX_CPU_MIN_CLOCK
5942         if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
5943                 dhd_rollback_cpu_freq(dhd);
5944 #endif /* FIX_CPU_MIN_CLOCK */
5945
5946         ifidx = dhd_net2idx(dhd, net);
5947         BCM_REFERENCE(ifidx);
5948
5949         /* Set state and stop OS transmissions */
5950         netif_stop_queue(net);
5951         dhd->pub.up = 0;
5952
5953 #ifdef WL_CFG80211
5954         if (ifidx == 0) {
5955                 dhd_if_t *ifp;
5956                 wl_cfg80211_down(NULL);
5957
5958                 ifp = dhd->iflist[0];
5959                 ASSERT(ifp && ifp->net);
5960                 /*
5961                  * For CFG80211: Clean up all the left over virtual interfaces
5962                  * when the primary Interface is brought down. [ifconfig wlan0 down]
5963                  */
5964                 if (!dhd_download_fw_on_driverload) {
5965                         if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
5966                                 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5967                                 int i;
5968
5969 #ifdef WL_CFG80211_P2P_DEV_IF
5970                                 wl_cfg80211_del_p2p_wdev();
5971 #endif /* WL_CFG80211_P2P_DEV_IF */
5972
5973                                 dhd_net_if_lock_local(dhd);
5974                                 for (i = 1; i < DHD_MAX_IFS; i++)
5975                                         dhd_remove_if(&dhd->pub, i, FALSE);
5976
5977                                 if (ifp && ifp->net) {
5978                                         dhd_if_del_sta_list(ifp);
5979                                 }
5980
5981 #ifdef ARP_OFFLOAD_SUPPORT
5982                                 if (dhd_inetaddr_notifier_registered) {
5983                                         dhd_inetaddr_notifier_registered = FALSE;
5984                                         unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
5985                                 }
5986 #endif /* ARP_OFFLOAD_SUPPORT */
5987 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
5988                                 if (dhd_inet6addr_notifier_registered) {
5989                                         dhd_inet6addr_notifier_registered = FALSE;
5990                                         unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
5991                                 }
5992 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
5993                                 dhd_net_if_unlock_local(dhd);
5994                         }
5995                         cancel_work_sync(dhd->dhd_deferred_wq);
5996 #if defined(DHD_LB) && defined(DHD_LB_RXP)
5997                         __skb_queue_purge(&dhd->rx_pend_queue);
5998 #endif /* DHD_LB && DHD_LB_RXP */
5999                 }
6000
6001 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6002                 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6003 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6004 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6005                 if (ifp->net == dhd->rx_napi_netdev) {
6006                         DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6007                                 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6008                         skb_queue_purge(&dhd->rx_napi_queue);
6009                         napi_disable(&dhd->rx_napi_struct);
6010                         netif_napi_del(&dhd->rx_napi_struct);
6011                         dhd->rx_napi_netdev = NULL;
6012                 }
6013 #endif /* DHD_LB && DHD_LB_RXP */
6014
6015         }
6016 #endif /* WL_CFG80211 */
6017
6018 #ifdef PROP_TXSTATUS
6019         dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6020 #endif
6021         /* Stop the protocol module */
6022         dhd_prot_stop(&dhd->pub);
6023
6024         OLD_MOD_DEC_USE_COUNT;
6025 exit:
6026         if (ifidx == 0 && !dhd_download_fw_on_driverload)
6027                 wl_android_wifi_off(net, TRUE);
6028         else {
6029                 if (dhd->pub.conf->deepsleep)
6030                         dhd_deepsleep(dhd, 1);
6031         }
6032         dhd->pub.hang_was_sent = 0;
6033
6034         /* Clear country spec for for built-in type driver */
6035         if (!dhd_download_fw_on_driverload) {
6036                 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6037                 dhd->pub.dhd_cspec.rev = 0;
6038                 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6039         }
6040
6041 #ifdef BCMDBGFS
6042         dhd_dbg_remove();
6043 #endif
6044
6045         DHD_PERIM_UNLOCK(&dhd->pub);
6046         DHD_OS_WAKE_UNLOCK(&dhd->pub);
6047
6048         /* Destroy wakelock */
6049         if (!dhd_download_fw_on_driverload &&
6050                 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6051                 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6052                 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6053         }
6054         printf("%s: Exit\n", __FUNCTION__);
6055
6056         return 0;
6057 }
6058
6059 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
6060 extern bool g_first_broadcast_scan;
6061 #endif 
6062
6063 #ifdef WL11U
6064 static int dhd_interworking_enable(dhd_pub_t *dhd)
6065 {
6066         char iovbuf[WLC_IOCTL_SMLEN];
6067         uint32 enable = true;
6068         int ret = BCME_OK;
6069
6070         bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
6071         ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6072         if (ret < 0) {
6073                 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6074         }
6075
6076         if (ret == BCME_OK) {
6077                 /* basic capabilities for HS20 REL2 */
6078                 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
6079                 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
6080                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6081                 if (ret < 0) {
6082                         DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
6083                 }
6084         }
6085
6086         return ret;
6087 }
6088 #endif /* WL11u */
6089
6090 static int
6091 dhd_open(struct net_device *net)
6092 {
6093         dhd_info_t *dhd = DHD_DEV_INFO(net);
6094 #ifdef TOE
6095         uint32 toe_ol;
6096 #endif
6097 #ifdef BCM_FD_AGGR
6098         char iovbuf[WLC_IOCTL_SMLEN];
6099         dbus_config_t config;
6100         uint32 agglimit = 0;
6101         uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
6102 #endif /* BCM_FD_AGGR */
6103         int ifidx;
6104         int32 ret = 0;
6105
6106         if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
6107                 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6108                 return -1;
6109         }
6110
6111         printf("%s: Enter %p\n", __FUNCTION__, net);
6112 #if defined(MULTIPLE_SUPPLICANT)
6113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6114         if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6115                 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
6116         }
6117         mutex_lock(&_dhd_sdio_mutex_lock_);
6118 #endif
6119 #endif /* MULTIPLE_SUPPLICANT */
6120         /* Init wakelock */
6121         if (!dhd_download_fw_on_driverload &&
6122                 !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6123                 DHD_OS_WAKE_LOCK_INIT(dhd);
6124                 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6125         }
6126
6127 #ifdef PREVENT_REOPEN_DURING_HANG
6128         /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6129         if (dhd->pub.hang_was_sent == 1) {
6130                 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6131                 /* Force to bring down WLAN interface in case dhd_stop() is not called
6132                  * from the upper layer when HANG event is triggered.
6133                  */
6134                 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6135                         DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6136                         dhd_stop(net);
6137                 } else {
6138                         return -1;
6139                 }
6140         }
6141 #endif /* PREVENT_REOPEN_DURING_HANG */
6142
6143
6144         DHD_OS_WAKE_LOCK(&dhd->pub);
6145         DHD_PERIM_LOCK(&dhd->pub);
6146         dhd->pub.dongle_trap_occured = 0;
6147         dhd->pub.hang_was_sent = 0;
6148         dhd->pub.hang_reason = 0;
6149 #ifdef DHD_LOSSLESS_ROAMING
6150         dhd->pub.dequeue_prec_map = ALLPRIO;
6151 #endif
6152 #if 0
6153         /*
6154          * Force start if ifconfig_up gets called before START command
6155          *  We keep WEXT's wl_control_wl_start to provide backward compatibility
6156          *  This should be removed in the future
6157          */
6158         ret = wl_control_wl_start(net);
6159         if (ret != 0) {
6160                 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6161                 ret = -1;
6162                 goto exit;
6163         }
6164 #endif
6165
6166         ifidx = dhd_net2idx(dhd, net);
6167         DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6168
6169         if (ifidx < 0) {
6170                 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6171                 ret = -1;
6172                 goto exit;
6173         }
6174
6175         if (!dhd->iflist[ifidx]) {
6176                 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6177                 ret = -1;
6178                 goto exit;
6179         }
6180
6181         if (ifidx == 0) {
6182                 atomic_set(&dhd->pend_8021x_cnt, 0);
6183                 if (!dhd_download_fw_on_driverload) {
6184                         DHD_ERROR(("\n%s\n", dhd_version));
6185 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
6186                         g_first_broadcast_scan = TRUE;
6187 #endif 
6188                         ret = wl_android_wifi_on(net);
6189                         if (ret != 0) {
6190                                 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
6191                                         __FUNCTION__, ret));
6192                                 ret = -1;
6193                                 goto exit;
6194                         }
6195                 }
6196 #ifdef FIX_CPU_MIN_CLOCK
6197                 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
6198                         dhd_init_cpufreq_fix(dhd);
6199                         dhd_fix_cpu_freq(dhd);
6200                 }
6201 #endif /* FIX_CPU_MIN_CLOCK */
6202
6203                 if (dhd->pub.busstate != DHD_BUS_DATA) {
6204
6205                         /* try to bring up bus */
6206                         DHD_PERIM_UNLOCK(&dhd->pub);
6207                         ret = dhd_bus_start(&dhd->pub);
6208                         DHD_PERIM_LOCK(&dhd->pub);
6209                         if (ret) {
6210                                 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6211                                 ret = -1;
6212                                 goto exit;
6213                         }
6214
6215                 }
6216                 if (dhd_download_fw_on_driverload) {
6217                         if (dhd->pub.conf->deepsleep)
6218                                 dhd_deepsleep(dhd, 0);
6219                 }
6220
6221 #ifdef BCM_FD_AGGR
6222                 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
6223
6224
6225                 memset(iovbuf, 0, sizeof(iovbuf));
6226                 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
6227                         iovbuf, sizeof(iovbuf));
6228
6229                 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
6230                         agglimit = *(uint32 *)iovbuf;
6231                         config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
6232                         config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
6233                         DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
6234                                 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
6235                         if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
6236                                 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
6237                         }
6238                 } else {
6239                         DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
6240                         rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
6241                 }
6242
6243                 /* Set aggregation for TX */
6244                 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
6245                         rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
6246
6247                 /* Set aggregation for RX */
6248                 memset(iovbuf, 0, sizeof(iovbuf));
6249                 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
6250                 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
6251                         dhd->pub.info->fdaggr = 0;
6252                         if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
6253                                 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
6254                         if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
6255                                 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
6256                 } else {
6257                         DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
6258                 }
6259 #endif /* BCM_FD_AGGR */
6260
6261                 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
6262                 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6263
6264 #ifdef TOE
6265                 /* Get current TOE mode from dongle */
6266                 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
6267                         dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
6268                 } else {
6269                         dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
6270                 }
6271 #endif /* TOE */
6272
6273 #if defined(WL_CFG80211)
6274                 if (unlikely(wl_cfg80211_up(NULL))) {
6275                         DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
6276                         ret = -1;
6277                         goto exit;
6278                 }
6279                 if (!dhd_download_fw_on_driverload) {
6280 #ifdef ARP_OFFLOAD_SUPPORT
6281                         dhd->pend_ipaddr = 0;
6282                         if (!dhd_inetaddr_notifier_registered) {
6283                                 dhd_inetaddr_notifier_registered = TRUE;
6284                                 register_inetaddr_notifier(&dhd_inetaddr_notifier);
6285                         }
6286 #endif /* ARP_OFFLOAD_SUPPORT */
6287 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6288                         if (!dhd_inet6addr_notifier_registered) {
6289                                 dhd_inet6addr_notifier_registered = TRUE;
6290                                 register_inet6addr_notifier(&dhd_inet6addr_notifier);
6291                         }
6292 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6293 #ifdef DHD_LB
6294                         DHD_LB_STATS_INIT(&dhd->pub);
6295 #ifdef DHD_LB_RXP
6296                         __skb_queue_head_init(&dhd->rx_pend_queue);
6297 #endif /* DHD_LB_RXP */
6298 #endif /* DHD_LB */
6299                 }
6300
6301 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6302 #if defined(SET_RPS_CPUS)
6303                 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6304 #else
6305                 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
6306 #endif 
6307 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6308 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6309                 if (dhd->rx_napi_netdev == NULL) {
6310                         dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
6311                         memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
6312                         netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
6313                                         dhd_napi_poll, dhd_napi_weight);
6314                         DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
6315                                         __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6316                         napi_enable(&dhd->rx_napi_struct);
6317                         DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
6318                         skb_queue_head_init(&dhd->rx_napi_queue);
6319                 }
6320 #endif /* DHD_LB && DHD_LB_RXP */
6321 #if defined(NUM_SCB_MAX_PROBE)
6322                 dhd_set_scb_probe(&dhd->pub);
6323 #endif /* NUM_SCB_MAX_PROBE */
6324 #endif /* WL_CFG80211 */
6325         }
6326
6327         /* Allow transmit calls */
6328         netif_start_queue(net);
6329         dhd->pub.up = 1;
6330
6331         OLD_MOD_INC_USE_COUNT;
6332
6333 #ifdef BCMDBGFS
6334         dhd_dbg_init(&dhd->pub);
6335 #endif
6336
6337 exit:
6338         if (ret) {
6339                 dhd_stop(net);
6340         }
6341
6342         DHD_PERIM_UNLOCK(&dhd->pub);
6343         DHD_OS_WAKE_UNLOCK(&dhd->pub);
6344
6345 #if defined(MULTIPLE_SUPPLICANT)
6346 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6347         mutex_unlock(&_dhd_sdio_mutex_lock_);
6348 #endif
6349 #endif /* MULTIPLE_SUPPLICANT */
6350
6351         printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
6352         return ret;
6353 }
6354
6355 int dhd_do_driver_init(struct net_device *net)
6356 {
6357         dhd_info_t *dhd = NULL;
6358
6359         if (!net) {
6360                 DHD_ERROR(("Primary Interface not initialized \n"));
6361                 return -EINVAL;
6362         }
6363
6364 #ifdef MULTIPLE_SUPPLICANT
6365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
6366         if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6367                 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
6368                 return 0;
6369         }
6370 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
6371 #endif /* MULTIPLE_SUPPLICANT */
6372
6373         /*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
6374         dhd = DHD_DEV_INFO(net);
6375
6376         /* If driver is already initialized, do nothing
6377          */
6378         if (dhd->pub.busstate == DHD_BUS_DATA) {
6379                 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
6380                 return 0;
6381         }
6382
6383         if (dhd_open(net) < 0) {
6384                 DHD_ERROR(("Driver Init Failed \n"));
6385                 return -1;
6386         }
6387
6388         return 0;
6389 }
6390
6391 int
6392 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6393 {
6394
6395 #ifdef WL_CFG80211
6396         if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6397                 return BCME_OK;
6398 #endif
6399
6400         /* handle IF event caused by wl commands, SoftAP, WEXT and
6401          * anything else. This has to be done asynchronously otherwise
6402          * DPC will be blocked (and iovars will timeout as DPC has no chance
6403          * to read the response back)
6404          */
6405         if (ifevent->ifidx > 0) {
6406                 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6407                 if (if_event == NULL) {
6408                         DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
6409                                 MALLOCED(dhdinfo->pub.osh)));
6410                         return BCME_NOMEM;
6411                 }
6412
6413                 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6414                 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6415                 strncpy(if_event->name, name, IFNAMSIZ);
6416                 if_event->name[IFNAMSIZ - 1] = '\0';
6417                 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
6418                         DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
6419         }
6420
6421         return BCME_OK;
6422 }
6423
6424 int
6425 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6426 {
6427         dhd_if_event_t *if_event;
6428
6429 #ifdef WL_CFG80211
6430         if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6431                 return BCME_OK;
6432 #endif /* WL_CFG80211 */
6433
6434         /* handle IF event caused by wl commands, SoftAP, WEXT and
6435          * anything else
6436          */
6437         if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6438         if (if_event == NULL) {
6439                 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
6440                         MALLOCED(dhdinfo->pub.osh)));
6441                 return BCME_NOMEM;
6442         }
6443         memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6444         memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6445         strncpy(if_event->name, name, IFNAMSIZ);
6446         if_event->name[IFNAMSIZ - 1] = '\0';
6447         dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
6448                 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
6449
6450         return BCME_OK;
6451 }
6452
6453 /* unregister and free the existing net_device interface (if any) in iflist and
6454  * allocate a new one. the slot is reused. this function does NOT register the
6455  * new interface to linux kernel. dhd_register_if does the job
6456  */
6457 struct net_device*
6458 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
6459         uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
6460 {
6461         dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6462         dhd_if_t *ifp;
6463
6464         ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
6465         ifp = dhdinfo->iflist[ifidx];
6466
6467         if (ifp != NULL) {
6468                 if (ifp->net != NULL) {
6469                         DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
6470
6471                         dhd_dev_priv_clear(ifp->net); /* clear net_device private */
6472
6473                         /* in unregister_netdev case, the interface gets freed by net->destructor
6474                          * (which is set to free_netdev)
6475                          */
6476                         if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6477                                 free_netdev(ifp->net);
6478                         } else {
6479                                 netif_stop_queue(ifp->net);
6480                                 if (need_rtnl_lock)
6481                                         unregister_netdev(ifp->net);
6482                                 else
6483                                         unregister_netdevice(ifp->net);
6484                         }
6485                         ifp->net = NULL;
6486                 }
6487         } else {
6488                 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
6489                 if (ifp == NULL) {
6490                         DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
6491                         return NULL;
6492                 }
6493         }
6494
6495         memset(ifp, 0, sizeof(dhd_if_t));
6496         ifp->info = dhdinfo;
6497         ifp->idx = ifidx;
6498         ifp->bssidx = bssidx;
6499         if (mac != NULL)
6500                 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
6501
6502         /* Allocate etherdev, including space for private structure */
6503         ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
6504         if (ifp->net == NULL) {
6505                 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
6506                 goto fail;
6507         }
6508
6509         /* Setup the dhd interface's netdevice private structure. */
6510         dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
6511
6512         if (name && name[0]) {
6513                 strncpy(ifp->net->name, name, IFNAMSIZ);
6514                 ifp->net->name[IFNAMSIZ - 1] = '\0';
6515         }
6516
6517 #ifdef WL_CFG80211
6518         if (ifidx == 0)
6519                 ifp->net->destructor = free_netdev;
6520         else
6521                 ifp->net->destructor = dhd_netdev_free;
6522 #else
6523         ifp->net->destructor = free_netdev;
6524 #endif /* WL_CFG80211 */
6525         strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
6526         ifp->name[IFNAMSIZ - 1] = '\0';
6527         dhdinfo->iflist[ifidx] = ifp;
6528
6529 /* initialize the dongle provided if name */
6530         if (dngl_name)
6531                 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
6532         else
6533                 strncpy(ifp->dngl_name, name, IFNAMSIZ);
6534
6535 #ifdef PCIE_FULL_DONGLE
6536         /* Initialize STA info list */
6537         INIT_LIST_HEAD(&ifp->sta_list);
6538         DHD_IF_STA_LIST_LOCK_INIT(ifp);
6539 #endif /* PCIE_FULL_DONGLE */
6540
6541 #ifdef DHD_L2_FILTER
6542         ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
6543         ifp->parp_allnode = TRUE;
6544 #endif
6545         return ifp->net;
6546
6547 fail:
6548
6549         if (ifp != NULL) {
6550                 if (ifp->net != NULL) {
6551                         dhd_dev_priv_clear(ifp->net);
6552                         free_netdev(ifp->net);
6553                         ifp->net = NULL;
6554                 }
6555                 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6556                 ifp = NULL;
6557         }
6558
6559         dhdinfo->iflist[ifidx] = NULL;
6560         return NULL;
6561 }
6562
6563 /* unregister and free the the net_device interface associated with the indexed
6564  * slot, also free the slot memory and set the slot pointer to NULL
6565  */
6566 int
6567 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
6568 {
6569         dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6570         dhd_if_t *ifp;
6571
6572         ifp = dhdinfo->iflist[ifidx];
6573
6574         if (ifp != NULL) {
6575                 if (ifp->net != NULL) {
6576                         DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
6577
6578                         /* in unregister_netdev case, the interface gets freed by net->destructor
6579                          * (which is set to free_netdev)
6580                          */
6581                         if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6582                                 free_netdev(ifp->net);
6583                         } else {
6584                                 netif_tx_disable(ifp->net);
6585
6586
6587
6588 #if defined(SET_RPS_CPUS)
6589                                 custom_rps_map_clear(ifp->net->_rx);
6590 #endif /* SET_RPS_CPUS */
6591 #if defined(SET_RPS_CPUS)
6592 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
6593                                 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
6594 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
6595 #endif 
6596                                 if (need_rtnl_lock)
6597                                         unregister_netdev(ifp->net);
6598                                 else
6599                                         unregister_netdevice(ifp->net);
6600                         }
6601                         ifp->net = NULL;
6602                         dhdinfo->iflist[ifidx] = NULL;
6603                 }
6604 #ifdef DHD_WMF
6605                 dhd_wmf_cleanup(dhdpub, ifidx);
6606 #endif /* DHD_WMF */
6607 #ifdef DHD_L2_FILTER
6608                 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
6609                         NULL, FALSE, dhdpub->tickcnt);
6610                 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
6611                 ifp->phnd_arp_table = NULL;
6612 #endif /* DHD_L2_FILTER */
6613
6614                 dhd_if_del_sta_list(ifp);
6615
6616                 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6617
6618         }
6619
6620         return BCME_OK;
6621 }
6622
6623 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6624 static struct net_device_ops dhd_ops_pri = {
6625         .ndo_open = dhd_open,
6626         .ndo_stop = dhd_stop,
6627         .ndo_get_stats = dhd_get_stats,
6628         .ndo_do_ioctl = dhd_ioctl_entry,
6629         .ndo_start_xmit = dhd_start_xmit,
6630         .ndo_set_mac_address = dhd_set_mac_address,
6631 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6632         .ndo_set_rx_mode = dhd_set_multicast_list,
6633 #else
6634         .ndo_set_multicast_list = dhd_set_multicast_list,
6635 #endif
6636 };
6637
6638 static struct net_device_ops dhd_ops_virt = {
6639         .ndo_get_stats = dhd_get_stats,
6640         .ndo_do_ioctl = dhd_ioctl_entry,
6641         .ndo_start_xmit = dhd_start_xmit,
6642         .ndo_set_mac_address = dhd_set_mac_address,
6643 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6644         .ndo_set_rx_mode = dhd_set_multicast_list,
6645 #else
6646         .ndo_set_multicast_list = dhd_set_multicast_list,
6647 #endif
6648 };
6649 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
6650
6651 #ifdef DEBUGGER
6652 extern void debugger_init(void *bus_handle);
6653 #endif
6654
6655
6656 #ifdef SHOW_LOGTRACE
6657 static char *logstrs_path = "/root/logstrs.bin";
6658 static char *st_str_file_path = "/root/rtecdc.bin";
6659 static char *map_file_path = "/root/rtecdc.map";
6660 static char *rom_st_str_file_path = "/root/roml.bin";
6661 static char *rom_map_file_path = "/root/roml.map";
6662
6663 #define BYTES_AHEAD_NUM         11      /* address in map file is before these many bytes */
6664 #define READ_NUM_BYTES          1000 /* read map file each time this No. of bytes */
6665 #define GO_BACK_FILE_POS_NUM_BYTES      100 /* set file pos back to cur pos */
6666 static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
6667 static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
6668 static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
6669 static char *ram_file_str = "rtecdc";
6670 static char *rom_file_str = "roml";
6671 #define RAMSTART_BIT    0x01
6672 #define RDSTART_BIT             0x02
6673 #define RDEND_BIT               0x04
6674 #define ALL_MAP_VAL             (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
6675
6676 module_param(logstrs_path, charp, S_IRUGO);
6677 module_param(st_str_file_path, charp, S_IRUGO);
6678 module_param(map_file_path, charp, S_IRUGO);
6679 module_param(rom_st_str_file_path, charp, S_IRUGO);
6680 module_param(rom_map_file_path, charp, S_IRUGO);
6681
6682 static void
6683 dhd_init_logstrs_array(dhd_event_log_t *temp)
6684 {
6685         struct file *filep = NULL;
6686         struct kstat stat;
6687         mm_segment_t fs;
6688         char *raw_fmts =  NULL;
6689         int logstrs_size = 0;
6690
6691         logstr_header_t *hdr = NULL;
6692         uint32 *lognums = NULL;
6693         char *logstrs = NULL;
6694         int ram_index = 0;
6695         char **fmts;
6696         int num_fmts = 0;
6697         uint32 i = 0;
6698         int error = 0;
6699
6700         fs = get_fs();
6701         set_fs(KERNEL_DS);
6702
6703         filep = filp_open(logstrs_path, O_RDONLY, 0);
6704
6705         if (IS_ERR(filep)) {
6706                 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
6707                 goto fail;
6708         }
6709         error = vfs_stat(logstrs_path, &stat);
6710         if (error) {
6711                 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
6712                 goto fail;
6713         }
6714         logstrs_size = (int) stat.size;
6715
6716         raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6717         if (raw_fmts == NULL) {
6718                 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
6719                 goto fail;
6720         }
6721         if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) !=   logstrs_size) {
6722                 DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
6723                 goto fail;
6724         }
6725
6726         /* Remember header from the logstrs.bin file */
6727         hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6728                 sizeof(logstr_header_t));
6729
6730         if (hdr->log_magic == LOGSTRS_MAGIC) {
6731                 /*
6732                 * logstrs.bin start with header.
6733                 */
6734                 num_fmts =      hdr->rom_logstrs_offset / sizeof(uint32);
6735                 ram_index = (hdr->ram_lognums_offset -
6736                         hdr->rom_lognums_offset) / sizeof(uint32);
6737                 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6738                 logstrs = (char *)       &raw_fmts[hdr->rom_logstrs_offset];
6739         } else {
6740                 /*
6741                  * Legacy logstrs.bin format without header.
6742                  */
6743                 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6744                 if (num_fmts == 0) {
6745                         /* Legacy ROM/RAM logstrs.bin format:
6746                           *  - ROM 'lognums' section
6747                           *   - RAM 'lognums' section
6748                           *   - ROM 'logstrs' section.
6749                           *   - RAM 'logstrs' section.
6750                           *
6751                           * 'lognums' is an array of indexes for the strings in the
6752                           * 'logstrs' section. The first uint32 is 0 (index of first
6753                           * string in ROM 'logstrs' section).
6754                           *
6755                           * The 4324b5 is the only ROM that uses this legacy format. Use the
6756                           * fixed number of ROM fmtnums to find the start of the RAM
6757                           * 'lognums' section. Use the fixed first ROM string ("Con\n") to
6758                           * find the ROM 'logstrs' section.
6759                           */
6760                         #define NUM_4324B5_ROM_FMTS     186
6761                         #define FIRST_4324B5_ROM_LOGSTR "Con\n"
6762                         ram_index = NUM_4324B5_ROM_FMTS;
6763                         lognums = (uint32 *) raw_fmts;
6764                         num_fmts =      ram_index;
6765                         logstrs = (char *) &raw_fmts[num_fmts << 2];
6766                         while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
6767                                 num_fmts++;
6768                                 logstrs = (char *) &raw_fmts[num_fmts << 2];
6769                         }
6770                 } else {
6771                                 /* Legacy RAM-only logstrs.bin format:
6772                                  *        - RAM 'lognums' section
6773                                  *        - RAM 'logstrs' section.
6774                                  *
6775                                  * 'lognums' is an array of indexes for the strings in the
6776                                  * 'logstrs' section. The first uint32 is an index to the
6777                                  * start of 'logstrs'. Therefore, if this index is divided
6778                                  * by 'sizeof(uint32)' it provides the number of logstr
6779                                  *      entries.
6780                                  */
6781                                 ram_index = 0;
6782                                 lognums = (uint32 *) raw_fmts;
6783                                 logstrs = (char *)      &raw_fmts[num_fmts << 2];
6784                         }
6785         }
6786         fmts = kmalloc(num_fmts  * sizeof(char *), GFP_KERNEL);
6787         if (fmts == NULL) {
6788                 DHD_ERROR(("Failed to allocate fmts memory\n"));
6789                 goto fail;
6790         }
6791
6792         for (i = 0; i < num_fmts; i++) {
6793                 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6794                 * (they are 0-indexed relative to 'rom_logstrs_offset').
6795                 *
6796                 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6797                 * are 0-indexed relative to the start of the logstrs.bin file).
6798                 */
6799                 if (i == ram_index) {
6800                         logstrs = raw_fmts;
6801                 }
6802                 fmts[i] = &logstrs[lognums[i]];
6803         }
6804         temp->fmts = fmts;
6805         temp->raw_fmts = raw_fmts;
6806         temp->num_fmts = num_fmts;
6807         filp_close(filep, NULL);
6808         set_fs(fs);
6809         return;
6810 fail:
6811         if (raw_fmts) {
6812                 kfree(raw_fmts);
6813                 raw_fmts = NULL;
6814         }
6815         if (!IS_ERR(filep))
6816                 filp_close(filep, NULL);
6817         set_fs(fs);
6818         temp->fmts = NULL;
6819         return;
6820 }
6821
6822 static int
6823 dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
6824         uint32 *rodata_end)
6825 {
6826         struct file *filep = NULL;
6827         mm_segment_t fs;
6828         char *raw_fmts =  NULL;
6829         uint32 read_size = READ_NUM_BYTES;
6830         int error = 0;
6831         char * cptr = NULL;
6832         char c;
6833         uint8 count = 0;
6834
6835         *ramstart = 0;
6836         *rodata_start = 0;
6837         *rodata_end = 0;
6838
6839         if (fname == NULL) {
6840                 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
6841                 return BCME_ERROR;
6842         }
6843
6844         fs = get_fs();
6845         set_fs(KERNEL_DS);
6846
6847         filep = filp_open(fname, O_RDONLY, 0);
6848         if (IS_ERR(filep)) {
6849                 DHD_ERROR(("%s: Failed to open %s \n",  __FUNCTION__, fname));
6850                 goto fail;
6851         }
6852
6853         /* Allocate 1 byte more than read_size to terminate it with NULL */
6854         raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
6855         if (raw_fmts == NULL) {
6856                 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6857                 goto fail;
6858         }
6859
6860         /* read ram start, rodata_start and rodata_end values from map  file */
6861
6862         while (count != ALL_MAP_VAL)
6863         {
6864                 error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
6865                 if (error < 0) {
6866                         DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
6867                                 map_file_path, error));
6868                         goto fail;
6869                 }
6870
6871                 if (error < read_size) {
6872                         /*
6873                         * since we reset file pos back to earlier pos by
6874                         * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6875                         * So if ret value is less than read_size, reached EOF don't read further
6876                         */
6877                         break;
6878                 }
6879                 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6880                 raw_fmts[read_size] = '\0';
6881
6882                 /* Get ramstart address */
6883                 if ((cptr = strstr(raw_fmts, ramstart_str))) {
6884                         cptr = cptr - BYTES_AHEAD_NUM;
6885                         sscanf(cptr, "%x %c text_start", ramstart, &c);
6886                         count |= RAMSTART_BIT;
6887                 }
6888
6889                 /* Get ram rodata start address */
6890                 if ((cptr = strstr(raw_fmts, rodata_start_str))) {
6891                         cptr = cptr - BYTES_AHEAD_NUM;
6892                         sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6893                         count |= RDSTART_BIT;
6894                 }
6895
6896                 /* Get ram rodata end address */
6897                 if ((cptr = strstr(raw_fmts, rodata_end_str))) {
6898                         cptr = cptr - BYTES_AHEAD_NUM;
6899                         sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6900                         count |= RDEND_BIT;
6901                 }
6902                 memset(raw_fmts, 0, read_size);
6903                 /*
6904                 * go back to predefined NUM of bytes so that we won't miss
6905                 * the string and  addr even if it comes as splited in next read.
6906                 */
6907                 filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
6908         }
6909
6910         DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6911                 *ramstart, *rodata_start, *rodata_end));
6912
6913         DHD_ERROR(("readmap over \n"));
6914
6915 fail:
6916         if (raw_fmts) {
6917                 kfree(raw_fmts);
6918                 raw_fmts = NULL;
6919         }
6920         if (!IS_ERR(filep))
6921                 filp_close(filep, NULL);
6922
6923         set_fs(fs);
6924         if (count == ALL_MAP_VAL) {
6925                 return BCME_OK;
6926         }
6927         DHD_ERROR(("readmap error 0X%x \n", count));
6928         return BCME_ERROR;
6929 }
6930
6931 static void
6932 dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
6933 {
6934         struct file *filep = NULL;
6935         mm_segment_t fs;
6936         char *raw_fmts =  NULL;
6937         uint32 logstrs_size = 0;
6938
6939         int error = 0;
6940         uint32 ramstart = 0;
6941         uint32 rodata_start = 0;
6942         uint32 rodata_end = 0;
6943         uint32 logfilebase = 0;
6944
6945         error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
6946         if (error == BCME_ERROR) {
6947                 DHD_ERROR(("readmap Error!! \n"));
6948                 /* don't do event log parsing in actual case */
6949                 temp->raw_sstr = NULL;
6950                 return;
6951         }
6952         DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6953                 ramstart, rodata_start, rodata_end));
6954
6955         fs = get_fs();
6956         set_fs(KERNEL_DS);
6957
6958         filep = filp_open(str_file, O_RDONLY, 0);
6959         if (IS_ERR(filep)) {
6960                 DHD_ERROR(("%s: Failed to open the file %s \n",  __FUNCTION__, str_file));
6961                 goto fail;
6962         }
6963
6964         /* Full file size is huge. Just read required part */
6965         logstrs_size = rodata_end - rodata_start;
6966
6967         raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6968         if (raw_fmts == NULL) {
6969                 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6970                 goto fail;
6971         }
6972
6973         logfilebase = rodata_start - ramstart;
6974
6975         error = generic_file_llseek(filep, logfilebase, SEEK_SET);
6976         if (error < 0) {
6977                 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
6978                 goto fail;
6979         }
6980
6981         error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
6982         if (error != logstrs_size) {
6983                 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
6984                 goto fail;
6985         }
6986
6987         if (strstr(str_file, ram_file_str) != NULL) {
6988                 temp->raw_sstr = raw_fmts;
6989                 temp->ramstart = ramstart;
6990                 temp->rodata_start = rodata_start;
6991                 temp->rodata_end = rodata_end;
6992         } else if (strstr(str_file, rom_file_str) != NULL) {
6993                 temp->rom_raw_sstr = raw_fmts;
6994                 temp->rom_ramstart = ramstart;
6995                 temp->rom_rodata_start = rodata_start;
6996                 temp->rom_rodata_end = rodata_end;
6997         }
6998
6999         filp_close(filep, NULL);
7000         set_fs(fs);
7001
7002         return;
7003 fail:
7004         if (raw_fmts) {
7005                 kfree(raw_fmts);
7006                 raw_fmts = NULL;
7007         }
7008         if (!IS_ERR(filep))
7009                 filp_close(filep, NULL);
7010         set_fs(fs);
7011         if (strstr(str_file, ram_file_str) != NULL) {
7012                 temp->raw_sstr = NULL;
7013         } else if (strstr(str_file, rom_file_str) != NULL) {
7014                 temp->rom_raw_sstr = NULL;
7015         }
7016         return;
7017 }
7018
7019 #endif /* SHOW_LOGTRACE */
7020
7021
7022 dhd_pub_t *
7023 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
7024 {
7025         dhd_info_t *dhd = NULL;
7026         struct net_device *net = NULL;
7027         char if_name[IFNAMSIZ] = {'\0'};
7028         uint32 bus_type = -1;
7029         uint32 bus_num = -1;
7030         uint32 slot_num = -1;
7031         wifi_adapter_info_t *adapter = NULL;
7032
7033         dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
7034         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7035
7036 #ifdef STBLINUX
7037         DHD_ERROR(("%s\n", driver_target));
7038 #endif /* STBLINUX */
7039         /* will implement get_ids for DBUS later */
7040 #if defined(BCMSDIO)
7041         dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
7042 #endif 
7043         adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7044
7045         /* Allocate primary dhd_info */
7046         dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
7047         if (dhd == NULL) {
7048                 dhd = MALLOC(osh, sizeof(dhd_info_t));
7049                 if (dhd == NULL) {
7050                         DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
7051                         goto fail;
7052                 }
7053         }
7054         memset(dhd, 0, sizeof(dhd_info_t));
7055         dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
7056
7057         dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
7058
7059         dhd->pub.osh = osh;
7060         dhd->adapter = adapter;
7061
7062 #ifdef GET_CUSTOM_MAC_ENABLE
7063         wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
7064 #endif /* GET_CUSTOM_MAC_ENABLE */
7065 #ifdef CUSTOM_FORCE_NODFS_FLAG
7066         dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
7067         dhd->pub.force_country_change = TRUE;
7068 #endif /* CUSTOM_FORCE_NODFS_FLAG */
7069 #ifdef CUSTOM_COUNTRY_CODE
7070         get_customized_country_code(dhd->adapter,
7071                 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
7072                 dhd->pub.dhd_cflags);
7073 #endif /* CUSTOM_COUNTRY_CODE */
7074         dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
7075         dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
7076
7077         /* Initialize thread based operation and lock */
7078         sema_init(&dhd->sdsem, 1);
7079
7080         /* Link to info module */
7081         dhd->pub.info = dhd;
7082
7083
7084         /* Link to bus module */
7085         dhd->pub.bus = bus;
7086         dhd->pub.hdrlen = bus_hdrlen;
7087
7088         /* dhd_conf must be attached after linking dhd to dhd->pub.info,
7089          * because dhd_detech will check .info is NULL or not.
7090         */
7091         if (dhd_conf_attach(&dhd->pub) != 0) {
7092                 DHD_ERROR(("dhd_conf_attach failed\n"));
7093                 goto fail;
7094         }
7095         dhd_conf_reset(&dhd->pub);
7096         dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
7097         dhd_conf_preinit(&dhd->pub);
7098
7099         /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
7100          * This is indeed a hack but we have to make it work properly before we have a better
7101          * solution
7102          */
7103         dhd_update_fw_nv_path(dhd);
7104 #ifndef BUILD_IN_KERNEL
7105         dhd_conf_read_config(&dhd->pub, dhd->conf_path);
7106 #endif
7107
7108         /* Set network interface name if it was provided as module parameter */
7109         if (iface_name[0]) {
7110                 int len;
7111                 char ch;
7112                 strncpy(if_name, iface_name, IFNAMSIZ);
7113                 if_name[IFNAMSIZ - 1] = 0;
7114                 len = strlen(if_name);
7115                 ch = if_name[len - 1];
7116                 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
7117                         strcat(if_name, "%d");
7118         }
7119
7120         /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
7121         net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
7122         if (net == NULL) {
7123                 goto fail;
7124         }
7125
7126
7127         dhd_state |= DHD_ATTACH_STATE_ADD_IF;
7128 #ifdef DHD_L2_FILTER
7129         /* initialize the l2_filter_cnt */
7130         dhd->pub.l2_filter_cnt = 0;
7131 #endif
7132 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7133         net->open = NULL;
7134 #else
7135         net->netdev_ops = NULL;
7136 #endif
7137
7138         mutex_init(&dhd->dhd_iovar_mutex);
7139         sema_init(&dhd->proto_sem, 1);
7140
7141 #ifdef PROP_TXSTATUS
7142         spin_lock_init(&dhd->wlfc_spinlock);
7143
7144         dhd->pub.skip_fc = dhd_wlfc_skip_fc;
7145         dhd->pub.plat_init = dhd_wlfc_plat_init;
7146         dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
7147
7148 #ifdef DHD_WLFC_THREAD
7149         init_waitqueue_head(&dhd->pub.wlfc_wqhead);
7150         dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
7151         if (IS_ERR(dhd->pub.wlfc_thread)) {
7152                 DHD_ERROR(("create wlfc thread failed\n"));
7153                 goto fail;
7154         } else {
7155                 wake_up_process(dhd->pub.wlfc_thread);
7156         }
7157 #endif /* DHD_WLFC_THREAD */
7158 #endif /* PROP_TXSTATUS */
7159
7160         /* Initialize other structure content */
7161         init_waitqueue_head(&dhd->ioctl_resp_wait);
7162         init_waitqueue_head(&dhd->d3ack_wait);
7163         init_waitqueue_head(&dhd->ctrl_wait);
7164         init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
7165         dhd->pub.dhd_bus_busy_state = 0;
7166
7167         /* Initialize the spinlocks */
7168         spin_lock_init(&dhd->sdlock);
7169         spin_lock_init(&dhd->txqlock);
7170         spin_lock_init(&dhd->dhd_lock);
7171         spin_lock_init(&dhd->rxf_lock);
7172 #if defined(RXFRAME_THREAD)
7173         dhd->rxthread_enabled = TRUE;
7174 #endif /* defined(RXFRAME_THREAD) */
7175
7176 #ifdef DHDTCPACK_SUPPRESS
7177         spin_lock_init(&dhd->tcpack_lock);
7178 #endif /* DHDTCPACK_SUPPRESS */
7179
7180         /* Initialize Wakelock stuff */
7181         spin_lock_init(&dhd->wakelock_spinlock);
7182         spin_lock_init(&dhd->wakelock_evt_spinlock);
7183         DHD_OS_WAKE_LOCK_INIT(dhd);
7184         dhd->wakelock_wd_counter = 0;
7185 #ifdef CONFIG_HAS_WAKELOCK
7186         wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
7187 #endif /* CONFIG_HAS_WAKELOCK */
7188
7189 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7190         mutex_init(&dhd->dhd_net_if_mutex);
7191         mutex_init(&dhd->dhd_suspend_mutex);
7192 #endif
7193         dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7194
7195         /* Attach and link in the protocol */
7196         if (dhd_prot_attach(&dhd->pub) != 0) {
7197                 DHD_ERROR(("dhd_prot_attach failed\n"));
7198                 goto fail;
7199         }
7200         dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
7201
7202 #ifdef WL_CFG80211
7203         /* Attach and link in the cfg80211 */
7204         if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
7205                 DHD_ERROR(("wl_cfg80211_attach failed\n"));
7206                 goto fail;
7207         }
7208
7209         dhd_monitor_init(&dhd->pub);
7210         dhd_state |= DHD_ATTACH_STATE_CFG80211;
7211 #endif
7212 #ifdef DHD_LOG_DUMP
7213         dhd_log_dump_init(&dhd->pub);
7214 #endif /* DHD_LOG_DUMP */
7215 #if defined(WL_WIRELESS_EXT)
7216         /* Attach and link in the iw */
7217         if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
7218                 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
7219                         DHD_ERROR(("wl_iw_attach failed\n"));
7220                         goto fail;
7221                 }
7222                 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
7223         }
7224 #endif /* defined(WL_WIRELESS_EXT) */
7225
7226 #ifdef SHOW_LOGTRACE
7227         dhd_init_logstrs_array(&dhd->event_data);
7228         dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
7229         dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
7230 #endif /* SHOW_LOGTRACE */
7231
7232         if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
7233                 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
7234                 goto fail;
7235         }
7236
7237
7238
7239         /* Set up the watchdog timer */
7240         init_timer(&dhd->timer);
7241         dhd->timer.data = (ulong)dhd;
7242         dhd->timer.function = dhd_watchdog;
7243         dhd->default_wd_interval = dhd_watchdog_ms;
7244
7245         if (dhd_watchdog_prio >= 0) {
7246                 /* Initialize watchdog thread */
7247                 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
7248                 if (dhd->thr_wdt_ctl.thr_pid < 0) {
7249                         goto fail;
7250                 }
7251
7252         } else {
7253                 dhd->thr_wdt_ctl.thr_pid = -1;
7254         }
7255
7256 #ifdef DHD_PCIE_RUNTIMEPM
7257         /* Setup up the runtime PM Idlecount timer */
7258         init_timer(&dhd->rpm_timer);
7259         dhd->rpm_timer.data = (ulong)dhd;
7260         dhd->rpm_timer.function = dhd_runtimepm;
7261         dhd->rpm_timer_valid = FALSE;
7262
7263         dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
7264         PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
7265         if (dhd->thr_rpm_ctl.thr_pid < 0) {
7266                 goto fail;
7267         }
7268 #endif /* DHD_PCIE_RUNTIMEPM */
7269
7270 #ifdef DEBUGGER
7271         debugger_init((void *) bus);
7272 #endif
7273
7274         /* Set up the bottom half handler */
7275         if (dhd_dpc_prio >= 0) {
7276                 /* Initialize DPC thread */
7277                 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
7278                 if (dhd->thr_dpc_ctl.thr_pid < 0) {
7279                         goto fail;
7280                 }
7281         } else {
7282                 /*  use tasklet for dpc */
7283                 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
7284                 dhd->thr_dpc_ctl.thr_pid = -1;
7285         }
7286
7287         if (dhd->rxthread_enabled) {
7288                 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
7289                 /* Initialize RXF thread */
7290                 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
7291                 if (dhd->thr_rxf_ctl.thr_pid < 0) {
7292                         goto fail;
7293                 }
7294         }
7295
7296         dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
7297
7298 #if defined(CONFIG_PM_SLEEP)
7299         if (!dhd_pm_notifier_registered) {
7300                 dhd_pm_notifier_registered = TRUE;
7301                 dhd->pm_notifier.notifier_call = dhd_pm_callback;
7302                 dhd->pm_notifier.priority = 10;
7303                 register_pm_notifier(&dhd->pm_notifier);
7304         }
7305
7306 #endif /* CONFIG_PM_SLEEP */
7307
7308 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7309         dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
7310         dhd->early_suspend.suspend = dhd_early_suspend;
7311         dhd->early_suspend.resume = dhd_late_resume;
7312         register_early_suspend(&dhd->early_suspend);
7313         dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
7314 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7315
7316 #ifdef ARP_OFFLOAD_SUPPORT
7317         dhd->pend_ipaddr = 0;
7318         if (!dhd_inetaddr_notifier_registered) {
7319                 dhd_inetaddr_notifier_registered = TRUE;
7320                 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7321         }
7322 #endif /* ARP_OFFLOAD_SUPPORT */
7323
7324 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7325         if (!dhd_inet6addr_notifier_registered) {
7326                 dhd_inet6addr_notifier_registered = TRUE;
7327                 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7328         }
7329 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7330         dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
7331 #ifdef DEBUG_CPU_FREQ
7332         dhd->new_freq = alloc_percpu(int);
7333         dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
7334         cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7335 #endif
7336 #ifdef DHDTCPACK_SUPPRESS
7337 #ifdef BCMSDIO
7338         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
7339 #elif defined(BCMPCIE)
7340         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
7341 #else
7342         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7343 #endif /* BCMSDIO */
7344 #endif /* DHDTCPACK_SUPPRESS */
7345
7346 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7347 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7348
7349         dhd_state |= DHD_ATTACH_STATE_DONE;
7350         dhd->dhd_state = dhd_state;
7351
7352         dhd_found++;
7353 #ifdef DHD_DEBUG_PAGEALLOC
7354         register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
7355 #endif /* DHD_DEBUG_PAGEALLOC */
7356
7357 #if defined(DHD_LB)
7358         DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
7359
7360         dhd_lb_set_default_cpus(dhd);
7361
7362         /* Initialize the CPU Masks */
7363         if (dhd_cpumasks_init(dhd) ==  0) {
7364
7365                 /* Now we have the current CPU maps, run through candidacy */
7366                 dhd_select_cpu_candidacy(dhd);
7367
7368                 /*
7369                  * If we are able to initialize CPU masks, lets register to the
7370                  * CPU Hotplug framework to change the CPU for each job dynamically
7371                  * using candidacy algorithm.
7372                  */
7373                 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
7374                 register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
7375         } else {
7376                 /*
7377                  * We are unable to initialize CPU masks, so candidacy algorithm
7378                  * won't run, but still Load Balancing will be honoured based
7379                  * on the CPUs allocated for a given job statically during init
7380                  */
7381                 dhd->cpu_notifier.notifier_call = NULL;
7382                 DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
7383                         __FUNCTION__));
7384         }
7385
7386
7387         DHD_LB_STATS_INIT(&dhd->pub);
7388
7389         /* Initialize the Load Balancing Tasklets and Napi object */
7390 #if defined(DHD_LB_TXC)
7391         tasklet_init(&dhd->tx_compl_tasklet,
7392                 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
7393         INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
7394         DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
7395 #endif /* DHD_LB_TXC */
7396
7397 #if defined(DHD_LB_RXC)
7398         tasklet_init(&dhd->rx_compl_tasklet,
7399                 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
7400         INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
7401         DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
7402 #endif /* DHD_LB_RXC */
7403
7404 #if defined(DHD_LB_RXP)
7405          __skb_queue_head_init(&dhd->rx_pend_queue);
7406         skb_queue_head_init(&dhd->rx_napi_queue);
7407
7408         /* Initialize the work that dispatches NAPI job to a given core */
7409         INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
7410         DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
7411 #endif /* DHD_LB_RXP */
7412
7413 #endif /* DHD_LB */
7414
7415         INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
7416
7417         (void)dhd_sysfs_init(dhd);
7418
7419         return &dhd->pub;
7420
7421 fail:
7422         if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
7423                 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
7424                         __FUNCTION__, dhd_state, &dhd->pub));
7425                 dhd->dhd_state = dhd_state;
7426                 dhd_detach(&dhd->pub);
7427                 dhd_free(&dhd->pub);
7428         }
7429
7430         return NULL;
7431 }
7432
7433 #include <linux/delay.h>
7434
7435 void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
7436 {
7437         dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7438
7439         schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
7440 }
7441
7442 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
7443 {
7444         if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
7445                 return DHD_FLAG_HOSTAP_MODE;
7446         if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
7447                 return DHD_FLAG_P2P_MODE;
7448         if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
7449                 return DHD_FLAG_IBSS_MODE;
7450         if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
7451                 return DHD_FLAG_MFG_MODE;
7452
7453         return DHD_FLAG_STA_MODE;
7454 }
7455
7456 extern int rkwifi_set_firmware(char *fw, char *nvram);
7457 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
7458 {
7459         int fw_len;
7460         int nv_len;
7461         int conf_len;
7462         const char *fw = NULL;
7463         const char *nv = NULL;
7464         const char *conf = NULL;
7465         char firmware[100] = {0};
7466         char nvram[100] = {0};
7467         wifi_adapter_info_t *adapter = dhdinfo->adapter;
7468
7469
7470         /* Update firmware and nvram path. The path may be from adapter info or module parameter
7471          * The path from adapter info is used for initialization only (as it won't change).
7472          *
7473          * The firmware_path/nvram_path module parameter may be changed by the system at run
7474          * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
7475          * command may change dhdinfo->fw_path. As such we need to clear the path info in
7476          * module parameter after it is copied. We won't update the path until the module parameter
7477          * is changed again (first character is not '\0')
7478          */
7479
7480         /* set default firmware and nvram path for built-in type driver */
7481 //      if (!dhd_download_fw_on_driverload) {
7482                 rkwifi_set_firmware(firmware, nvram);
7483 #ifdef CONFIG_BCMDHD_FW_PATH
7484                 fw = CONFIG_BCMDHD_FW_PATH;
7485 #else
7486                 fw = firmware;
7487 #endif /* CONFIG_BCMDHD_FW_PATH */
7488 #ifdef CONFIG_BCMDHD_NVRAM_PATH
7489                 nv = CONFIG_BCMDHD_NVRAM_PATH;
7490 #else
7491                 nv = nvram;
7492 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
7493 //      }
7494
7495         /* check if we need to initialize the path */
7496         if (dhdinfo->fw_path[0] == '\0') {
7497                 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
7498                         fw = adapter->fw_path;
7499
7500         }
7501         if (dhdinfo->nv_path[0] == '\0') {
7502                 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
7503                         nv = adapter->nv_path;
7504         }
7505         if (dhdinfo->conf_path[0] == '\0') {
7506                 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
7507                         conf = adapter->conf_path;
7508         }
7509
7510         /* Use module parameter if it is valid, EVEN IF the path has not been initialized
7511          *
7512          * TODO: need a solution for multi-chip, can't use the same firmware for all chips
7513          */
7514         if (firmware_path[0] != '\0')
7515                 fw = firmware_path;
7516         if (nvram_path[0] != '\0')
7517                 nv = nvram_path;
7518         if (config_path[0] != '\0')
7519                 conf = config_path;
7520
7521         if (fw && fw[0] != '\0') {
7522                 fw_len = strlen(fw);
7523                 if (fw_len >= sizeof(dhdinfo->fw_path)) {
7524                         DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
7525                         return FALSE;
7526                 }
7527                 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
7528                 if (dhdinfo->fw_path[fw_len-1] == '\n')
7529                        dhdinfo->fw_path[fw_len-1] = '\0';
7530         }
7531         if (nv && nv[0] != '\0') {
7532                 nv_len = strlen(nv);
7533                 if (nv_len >= sizeof(dhdinfo->nv_path)) {
7534                         DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
7535                         return FALSE;
7536                 }
7537                 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
7538                 if (dhdinfo->nv_path[nv_len-1] == '\n')
7539                        dhdinfo->nv_path[nv_len-1] = '\0';
7540         }
7541         if (conf && conf[0] != '\0') {
7542                 conf_len = strlen(conf);
7543                 if (conf_len >= sizeof(dhdinfo->conf_path)) {
7544                         DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
7545                         return FALSE;
7546                 }
7547                 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
7548                 if (dhdinfo->conf_path[conf_len-1] == '\n')
7549                        dhdinfo->conf_path[conf_len-1] = '\0';
7550         }
7551
7552 #if 0
7553         /* clear the path in module parameter */
7554         if (dhd_download_fw_on_driverload) {
7555                 firmware_path[0] = '\0';
7556                 nvram_path[0] = '\0';
7557                 config_path[0] = '\0';
7558         }
7559 #endif
7560
7561 #ifndef BCMEMBEDIMAGE
7562         /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
7563         if (dhdinfo->fw_path[0] == '\0') {
7564                 DHD_ERROR(("firmware path not found\n"));
7565                 return FALSE;
7566         }
7567         if (dhdinfo->nv_path[0] == '\0') {
7568                 DHD_ERROR(("nvram path not found\n"));
7569                 return FALSE;
7570         }
7571         if (dhdinfo->conf_path[0] == '\0') {
7572                 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
7573         }
7574 #ifdef CONFIG_PATH_AUTO_SELECT
7575         dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path);
7576 #endif
7577 #endif /* BCMEMBEDIMAGE */
7578
7579         return TRUE;
7580 }
7581
7582 #ifdef CUSTOMER_HW4_DEBUG
7583 bool dhd_validate_chipid(dhd_pub_t *dhdp)
7584 {
7585         uint chipid = dhd_bus_chip_id(dhdp);
7586         uint config_chipid;
7587
7588 #ifdef BCM4359_CHIP
7589         config_chipid = BCM4359_CHIP_ID;
7590 #elif defined(BCM4358_CHIP)
7591         config_chipid = BCM4358_CHIP_ID;
7592 #elif defined(BCM4354_CHIP)
7593         config_chipid = BCM4354_CHIP_ID;
7594 #elif defined(BCM4356_CHIP)
7595         config_chipid = BCM4356_CHIP_ID;
7596 #elif defined(BCM4339_CHIP)
7597         config_chipid = BCM4339_CHIP_ID;
7598 #elif defined(BCM43349_CHIP)
7599         config_chipid = BCM43349_CHIP_ID;
7600 #elif defined(BCM4335_CHIP)
7601         config_chipid = BCM4335_CHIP_ID;
7602 #elif defined(BCM43241_CHIP)
7603         config_chipid = BCM4324_CHIP_ID;
7604 #elif defined(BCM4330_CHIP)
7605         config_chipid = BCM4330_CHIP_ID;
7606 #elif defined(BCM43430_CHIP)
7607         config_chipid = BCM43430_CHIP_ID;
7608 #elif defined(BCM4334W_CHIP)
7609         config_chipid = BCM43342_CHIP_ID;
7610 #elif defined(BCM43455_CHIP)
7611         config_chipid = BCM4345_CHIP_ID;
7612 #else
7613         DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
7614                 " please add CONFIG_BCMXXXX into the Kernel and"
7615                 " BCMXXXX_CHIP definition into the DHD driver\n",
7616                 __FUNCTION__));
7617         config_chipid = 0;
7618
7619         return FALSE;
7620 #endif /* BCM4354_CHIP */
7621
7622 #if defined(BCM4359_CHIP)
7623         if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
7624                 return TRUE;
7625         }
7626 #endif /* BCM4359_CHIP */
7627
7628         return config_chipid == chipid;
7629 }
7630 #endif /* CUSTOMER_HW4_DEBUG */
7631
7632 int
7633 dhd_bus_start(dhd_pub_t *dhdp)
7634 {
7635         int ret = -1;
7636         dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7637         unsigned long flags;
7638
7639         ASSERT(dhd);
7640
7641         DHD_TRACE(("Enter %s:\n", __FUNCTION__));
7642
7643         DHD_PERIM_LOCK(dhdp);
7644
7645         /* try to download image and nvram to the dongle */
7646         if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
7647                 /* Indicate FW Download has not yet done */
7648                 dhd->pub.is_fw_download_done = FALSE;
7649                 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
7650                         __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
7651                 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
7652                         dhd->fw_path, dhd->nv_path, dhd->conf_path);
7653                 if (ret < 0) {
7654                         DHD_ERROR(("%s: failed to download firmware %s\n",
7655                                 __FUNCTION__, dhd->fw_path));
7656                         DHD_PERIM_UNLOCK(dhdp);
7657                         return ret;
7658                 }
7659                 /* Indicate FW Download has succeeded */
7660                 dhd->pub.is_fw_download_done = TRUE;
7661         }
7662         if (dhd->pub.busstate != DHD_BUS_LOAD) {
7663                 DHD_PERIM_UNLOCK(dhdp);
7664                 return -ENETDOWN;
7665         }
7666
7667         dhd_os_sdlock(dhdp);
7668
7669         /* Start the watchdog timer */
7670         dhd->pub.tickcnt = 0;
7671         dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
7672         DHD_ENABLE_RUNTIME_PM(&dhd->pub);
7673
7674         /* Bring up the bus */
7675         if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
7676
7677                 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
7678                 dhd_os_sdunlock(dhdp);
7679                 DHD_PERIM_UNLOCK(dhdp);
7680                 return ret;
7681         }
7682 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7683 #if defined(BCMPCIE_OOB_HOST_WAKE)
7684         dhd_os_sdunlock(dhdp);
7685 #endif /* BCMPCIE_OOB_HOST_WAKE */
7686         /* Host registration for OOB interrupt */
7687         if (dhd_bus_oob_intr_register(dhdp)) {
7688                 /* deactivate timer and wait for the handler to finish */
7689 #if !defined(BCMPCIE_OOB_HOST_WAKE)
7690                 DHD_GENERAL_LOCK(&dhd->pub, flags);
7691                 dhd->wd_timer_valid = FALSE;
7692                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7693                 del_timer_sync(&dhd->timer);
7694
7695                 dhd_os_sdunlock(dhdp);
7696 #endif /* !BCMPCIE_OOB_HOST_WAKE */
7697                 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7698                 DHD_PERIM_UNLOCK(dhdp);
7699                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7700                 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
7701                 return -ENODEV;
7702         }
7703
7704 #if defined(BCMPCIE_OOB_HOST_WAKE)
7705         dhd_os_sdlock(dhdp);
7706         dhd_bus_oob_intr_set(dhdp, TRUE);
7707 #else
7708         /* Enable oob at firmware */
7709         dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7710 #endif /* BCMPCIE_OOB_HOST_WAKE */
7711 #elif defined(FORCE_WOWLAN)
7712         /* Enable oob at firmware */
7713         dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7714 #endif 
7715 #ifdef PCIE_FULL_DONGLE
7716         {
7717                 /* max_h2d_rings includes H2D common rings */
7718                 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
7719
7720                 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
7721                         max_h2d_rings));
7722                 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
7723                         dhd_os_sdunlock(dhdp);
7724                         DHD_PERIM_UNLOCK(dhdp);
7725                         return ret;
7726                 }
7727         }
7728 #endif /* PCIE_FULL_DONGLE */
7729
7730         /* Do protocol initialization necessary for IOCTL/IOVAR */
7731 #ifdef PCIE_FULL_DONGLE
7732         dhd_os_sdunlock(dhdp);
7733 #endif /* PCIE_FULL_DONGLE */
7734         ret = dhd_prot_init(&dhd->pub);
7735         if (unlikely(ret) != BCME_OK) {
7736                 DHD_PERIM_UNLOCK(dhdp);
7737                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7738                 return ret;
7739         }
7740 #ifdef PCIE_FULL_DONGLE
7741         dhd_os_sdlock(dhdp);
7742 #endif /* PCIE_FULL_DONGLE */
7743
7744         /* If bus is not ready, can't come up */
7745         if (dhd->pub.busstate != DHD_BUS_DATA) {
7746                 DHD_GENERAL_LOCK(&dhd->pub, flags);
7747                 dhd->wd_timer_valid = FALSE;
7748                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7749                 del_timer_sync(&dhd->timer);
7750                 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
7751                 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7752                 dhd_os_sdunlock(dhdp);
7753                 DHD_PERIM_UNLOCK(dhdp);
7754                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7755                 return -ENODEV;
7756         }
7757
7758         dhd_os_sdunlock(dhdp);
7759
7760         /* Bus is ready, query any dongle information */
7761         if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7762                 DHD_GENERAL_LOCK(&dhd->pub, flags);
7763                 dhd->wd_timer_valid = FALSE;
7764                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7765                 del_timer_sync(&dhd->timer);
7766                 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
7767                 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7768                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7769                 DHD_PERIM_UNLOCK(dhdp);
7770                 return ret;
7771         }
7772
7773 #ifdef ARP_OFFLOAD_SUPPORT
7774         if (dhd->pend_ipaddr) {
7775 #ifdef AOE_IP_ALIAS_SUPPORT
7776                 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
7777 #endif /* AOE_IP_ALIAS_SUPPORT */
7778                 dhd->pend_ipaddr = 0;
7779         }
7780 #endif /* ARP_OFFLOAD_SUPPORT */
7781
7782         DHD_PERIM_UNLOCK(dhdp);
7783         return 0;
7784 }
7785
7786 #ifdef WLTDLS
7787 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
7788 {
7789         char iovbuf[WLC_IOCTL_SMLEN];
7790         uint32 tdls = tdls_on;
7791         int ret = 0;
7792         uint32 tdls_auto_op = 0;
7793         uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
7794         int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
7795         int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
7796         BCM_REFERENCE(mac);
7797         if (!FW_SUPPORTED(dhd, tdls))
7798                 return BCME_ERROR;
7799
7800         if (dhd->tdls_enable == tdls_on)
7801                 goto auto_mode;
7802         bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
7803         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7804                 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
7805                 goto exit;
7806         }
7807         dhd->tdls_enable = tdls_on;
7808 auto_mode:
7809
7810         tdls_auto_op = auto_on;
7811         bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
7812                 iovbuf, sizeof(iovbuf));
7813         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7814                 sizeof(iovbuf), TRUE, 0)) < 0) {
7815                 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
7816                 goto exit;
7817         }
7818
7819         if (tdls_auto_op) {
7820                 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
7821                         sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
7822                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7823                         sizeof(iovbuf), TRUE, 0)) < 0) {
7824                         DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
7825                         goto exit;
7826                 }
7827                 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
7828                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7829                         sizeof(iovbuf), TRUE, 0)) < 0) {
7830                         DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
7831                         goto exit;
7832                 }
7833                 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
7834                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7835                         sizeof(iovbuf), TRUE, 0)) < 0) {
7836                         DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
7837                         goto exit;
7838                 }
7839         }
7840
7841 exit:
7842         return ret;
7843 }
7844
7845 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
7846 {
7847         dhd_info_t *dhd = DHD_DEV_INFO(dev);
7848         int ret = 0;
7849         if (dhd)
7850                 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
7851         else
7852                 ret = BCME_ERROR;
7853         return ret;
7854 }
7855 int
7856 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
7857 {
7858         char iovbuf[WLC_IOCTL_SMLEN];
7859         int ret = 0;
7860         bool auto_on = false;
7861         uint32 mode =  wfd_mode;
7862
7863 #ifdef ENABLE_TDLS_AUTO_MODE
7864         if (wfd_mode) {
7865                 auto_on = false;
7866         } else {
7867                 auto_on = true;
7868         }
7869 #else
7870         auto_on = false;
7871 #endif /* ENABLE_TDLS_AUTO_MODE */
7872         ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
7873         if (ret < 0) {
7874                 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
7875                 return ret;
7876         }
7877
7878
7879         bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
7880                         iovbuf, sizeof(iovbuf));
7881         if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7882                         sizeof(iovbuf), TRUE, 0)) < 0) &&
7883                         (ret != BCME_UNSUPPORTED)) {
7884                 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
7885                 return ret;
7886         }
7887
7888         ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
7889         if (ret < 0) {
7890                 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
7891                 return ret;
7892         }
7893
7894         dhd->tdls_mode = mode;
7895         return ret;
7896 }
7897 #ifdef PCIE_FULL_DONGLE
7898 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
7899 {
7900         dhd_info_t *dhd = DHD_DEV_INFO(dev);
7901         dhd_pub_t *dhdp =  (dhd_pub_t *)&dhd->pub;
7902         tdls_peer_node_t *cur = dhdp->peer_tbl.node;
7903         tdls_peer_node_t *new = NULL, *prev = NULL;
7904         dhd_if_t *dhdif;
7905         uint8 sa[ETHER_ADDR_LEN];
7906         int ifidx = dhd_net2idx(dhd, dev);
7907
7908         if (ifidx == DHD_BAD_IF)
7909                 return;
7910
7911         dhdif = dhd->iflist[ifidx];
7912         memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
7913
7914         if (connect) {
7915                 while (cur != NULL) {
7916                         if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7917                                 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
7918                                         __FUNCTION__, __LINE__));
7919                                 return;
7920                         }
7921                         cur = cur->next;
7922                 }
7923
7924                 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
7925                 if (new == NULL) {
7926                         DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
7927                         return;
7928                 }
7929                 memcpy(new->addr, da, ETHER_ADDR_LEN);
7930                 new->next = dhdp->peer_tbl.node;
7931                 dhdp->peer_tbl.node = new;
7932                 dhdp->peer_tbl.tdls_peer_count++;
7933
7934         } else {
7935                 while (cur != NULL) {
7936                         if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7937                                 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
7938                                 if (prev)
7939                                         prev->next = cur->next;
7940                                 else
7941                                         dhdp->peer_tbl.node = cur->next;
7942                                 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
7943                                 dhdp->peer_tbl.tdls_peer_count--;
7944                                 return;
7945                         }
7946                         prev = cur;
7947                         cur = cur->next;
7948                 }
7949                 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
7950         }
7951 }
7952 #endif /* PCIE_FULL_DONGLE */
7953 #endif 
7954
7955 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
7956 {
7957         if (!dhd)
7958                 return FALSE;
7959
7960         if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
7961                 return TRUE;
7962         else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
7963                 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
7964                 return TRUE;
7965         else
7966                 return FALSE;
7967 }
7968 #if !defined(AP) && defined(WLP2P)
7969 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
7970  * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
7971  * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
7972  * would still be named as fw_bcmdhd_apsta.
7973  */
7974 uint32
7975 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
7976 {
7977         int32 ret = 0;
7978         char buf[WLC_IOCTL_SMLEN];
7979         bool mchan_supported = FALSE;
7980         /* if dhd->op_mode is already set for HOSTAP and Manufacturing
7981          * test mode, that means we only will use the mode as it is
7982          */
7983         if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
7984                 return 0;
7985         if (FW_SUPPORTED(dhd, vsdb)) {
7986                 mchan_supported = TRUE;
7987         }
7988         if (!FW_SUPPORTED(dhd, p2p)) {
7989                 DHD_TRACE(("Chip does not support p2p\n"));
7990                 return 0;
7991         } else {
7992                 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
7993                 memset(buf, 0, sizeof(buf));
7994                 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
7995                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
7996                         FALSE, 0)) < 0) {
7997                         DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
7998                         return 0;
7999                 } else {
8000                         if (buf[0] == 1) {
8001                                 /* By default, chip supports single chan concurrency,
8002                                 * now lets check for mchan
8003                                 */
8004                                 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
8005                                 if (mchan_supported)
8006                                         ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
8007                                 if (FW_SUPPORTED(dhd, rsdb)) {
8008                                         ret |= DHD_FLAG_RSDB_MODE;
8009                                 }
8010                                 if (FW_SUPPORTED(dhd, mp2p)) {
8011                                         ret |= DHD_FLAG_MP2P_MODE;
8012                                 }
8013 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
8014                                 return ret;
8015 #else
8016                                 return 0;
8017 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
8018                         }
8019                 }
8020         }
8021         return 0;
8022 }
8023 #endif 
8024
8025 #ifdef SUPPORT_AP_POWERSAVE
8026 #define RXCHAIN_PWRSAVE_PPS                     10
8027 #define RXCHAIN_PWRSAVE_QUIET_TIME              10
8028 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK        0
8029 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
8030 {
8031         char iovbuf[128];
8032         int32 pps = RXCHAIN_PWRSAVE_PPS;
8033         int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
8034         int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
8035
8036         if (enable) {
8037                 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8038                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8039                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8040                         DHD_ERROR(("Failed to enable AP power save\n"));
8041                 }
8042                 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
8043                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8044                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8045                         DHD_ERROR(("Failed to set pps\n"));
8046                 }
8047                 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
8048                 4, iovbuf, sizeof(iovbuf));
8049                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8050                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8051                         DHD_ERROR(("Failed to set quiet time\n"));
8052                 }
8053                 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
8054                 4, iovbuf, sizeof(iovbuf));
8055                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8056                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8057                         DHD_ERROR(("Failed to set stas assoc check\n"));
8058                 }
8059         } else {
8060                 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8061                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8062                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8063                         DHD_ERROR(("Failed to disable AP power save\n"));
8064                 }
8065         }
8066
8067         return 0;
8068 }
8069 #endif /* SUPPORT_AP_POWERSAVE */
8070
8071
8072 int
8073 dhd_preinit_ioctls(dhd_pub_t *dhd)
8074 {
8075         int ret = 0;
8076         char eventmask[WL_EVENTING_MASK_LEN];
8077         char iovbuf[WL_EVENTING_MASK_LEN + 12]; /*  Room for "event_msgs" + '\0' + bitvec  */
8078         uint32 buf_key_b4_m4 = 1;
8079 #ifndef WL_CFG80211
8080         u32 up = 0;
8081 #endif
8082         uint8 msglen;
8083         eventmsgs_ext_t *eventmask_msg = NULL;
8084         char* iov_buf = NULL;
8085         int ret2 = 0;
8086 #if defined(CUSTOM_AMPDU_BA_WSIZE)
8087         uint32 ampdu_ba_wsize = 0;
8088 #endif 
8089 #if defined(CUSTOM_AMPDU_MPDU)
8090         int32 ampdu_mpdu = 0;
8091 #endif
8092 #if defined(CUSTOM_AMPDU_RELEASE)
8093         int32 ampdu_release = 0;
8094 #endif
8095 #if defined(CUSTOM_AMSDU_AGGSF)
8096         int32 amsdu_aggsf = 0;
8097 #endif
8098 #ifdef SUPPORT_SENSORHUB
8099         int32 shub_enable = 0;
8100 #endif /* SUPPORT_SENSORHUB */
8101 #if defined(BCMSDIO)
8102 #ifdef PROP_TXSTATUS
8103         int wlfc_enable = TRUE;
8104 #ifndef DISABLE_11N
8105         uint32 hostreorder = 1;
8106         uint wl_down = 1;
8107 #endif /* DISABLE_11N */
8108 #endif /* PROP_TXSTATUS */
8109 #endif 
8110 #ifdef PCIE_FULL_DONGLE
8111         uint32 wl_ap_isolate;
8112 #endif /* PCIE_FULL_DONGLE */
8113
8114 #if defined(BCMSDIO)
8115         /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
8116         uint32 frameburst = 0;
8117 #else
8118         uint32 frameburst = 1;
8119 #endif /* BCMSDIO */
8120
8121 #ifdef DHD_ENABLE_LPC
8122         uint32 lpc = 1;
8123 #endif /* DHD_ENABLE_LPC */
8124         uint power_mode = PM_FAST;
8125 #if defined(BCMSDIO)
8126         uint32 dongle_align = DHD_SDALIGN;
8127         uint32 glom = CUSTOM_GLOM_SETTING;
8128 #endif /* defined(BCMSDIO) */
8129 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8130         uint32 credall = 1;
8131 #endif
8132         uint bcn_timeout = dhd->conf->bcn_timeout;
8133 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
8134         uint32 bcn_li_bcn = 1;
8135 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8136         uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
8137 #if defined(ARP_OFFLOAD_SUPPORT)
8138         int arpoe = 1;
8139 #endif
8140         int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
8141         int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
8142         int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
8143         char buf[WLC_IOCTL_SMLEN];
8144         char *ptr;
8145         uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
8146 #ifdef ROAM_ENABLE
8147         uint roamvar = 0;
8148         int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
8149         int roam_scan_period[2] = {10, WLC_BAND_ALL};
8150         int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
8151 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
8152         int roam_fullscan_period = 60;
8153 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8154         int roam_fullscan_period = 120;
8155 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8156 #else
8157 #ifdef DISABLE_BUILTIN_ROAM
8158         uint roamvar = 1;
8159 #endif /* DISABLE_BUILTIN_ROAM */
8160 #endif /* ROAM_ENABLE */
8161
8162 #if defined(SOFTAP)
8163         uint dtim = 1;
8164 #endif
8165 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
8166         uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
8167         struct ether_addr p2p_ea;
8168 #endif
8169 #ifdef SOFTAP_UAPSD_OFF
8170         uint32 wme_apsd = 0;
8171 #endif /* SOFTAP_UAPSD_OFF */
8172 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
8173         uint32 apsta = 1; /* Enable APSTA mode */
8174 #elif defined(SOFTAP_AND_GC)
8175         uint32 apsta = 0;
8176         int ap_mode = 1;
8177 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
8178 #ifdef GET_CUSTOM_MAC_ENABLE
8179         struct ether_addr ea_addr;
8180 #endif /* GET_CUSTOM_MAC_ENABLE */
8181
8182 #ifdef DISABLE_11N
8183         uint32 nmode = 0;
8184 #endif /* DISABLE_11N */
8185
8186 #ifdef USE_WL_TXBF
8187         uint32 txbf = 1;
8188 #endif /* USE_WL_TXBF */
8189 #if defined(PROP_TXSTATUS)
8190 #ifdef USE_WFA_CERT_CONF
8191         uint32 proptx = 0;
8192 #endif /* USE_WFA_CERT_CONF */
8193 #endif /* PROP_TXSTATUS */
8194 #ifdef CUSTOM_PSPRETEND_THR
8195         uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
8196 #endif
8197         uint32 rsdb_mode = 0;
8198 #ifdef ENABLE_TEMP_THROTTLING
8199         wl_temp_control_t temp_control;
8200 #endif /* ENABLE_TEMP_THROTTLING */
8201 #ifdef DISABLE_PRUNED_SCAN
8202         uint32 scan_features = 0;
8203 #endif /* DISABLE_PRUNED_SCAN */
8204 #ifdef CUSTOM_EVENT_PM_WAKE
8205         uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
8206 #endif /* CUSTOM_EVENT_PM_WAKE */
8207 #ifdef PKT_FILTER_SUPPORT
8208         dhd_pkt_filter_enable = TRUE;
8209 #endif /* PKT_FILTER_SUPPORT */
8210 #ifdef WLTDLS
8211         dhd->tdls_enable = FALSE;
8212         dhd_tdls_set_mode(dhd, false);
8213 #endif /* WLTDLS */
8214         dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
8215         DHD_TRACE(("Enter %s\n", __FUNCTION__));
8216
8217         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
8218 #ifdef DHDTCPACK_SUPPRESS
8219         printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
8220         dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
8221 #endif
8222
8223         dhd->op_mode = 0;
8224 #ifdef CUSTOMER_HW4_DEBUG
8225         if (!dhd_validate_chipid(dhd)) {
8226                 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
8227                         __FUNCTION__, dhd_bus_chip_id(dhd)));
8228 #ifndef SUPPORT_MULTIPLE_CHIPS
8229                 ret = BCME_BADARG;
8230                 goto done;
8231 #endif /* !SUPPORT_MULTIPLE_CHIPS */
8232         }
8233 #endif /* CUSTOMER_HW4_DEBUG */
8234         if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8235                 (op_mode == DHD_FLAG_MFG_MODE)) {
8236 #ifdef DHD_PCIE_RUNTIMEPM
8237                 /* Disable RuntimePM in mfg mode */
8238                 DHD_DISABLE_RUNTIME_PM(dhd);
8239                 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
8240 #endif /* DHD_PCIE_RUNTIME_PM */
8241                 /* Check and adjust IOCTL response timeout for Manufactring firmware */
8242                 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
8243                 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
8244                         __FUNCTION__));
8245         } else {
8246                 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
8247                 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
8248         }
8249 #ifdef GET_CUSTOM_MAC_ENABLE
8250         ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
8251         if (!ret) {
8252                 memset(buf, 0, sizeof(buf));
8253                 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
8254                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8255                 if (ret < 0) {
8256                         DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
8257                                 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
8258                         ret = BCME_NOTUP;
8259                         goto done;
8260                 }
8261                 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
8262         } else {
8263 #endif /* GET_CUSTOM_MAC_ENABLE */
8264                 /* Get the default device MAC address directly from firmware */
8265                 memset(buf, 0, sizeof(buf));
8266                 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
8267                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8268                         FALSE, 0)) < 0) {
8269                         DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
8270                         ret = BCME_NOTUP;
8271                         goto done;
8272                 }
8273                 /* Update public MAC address after reading from Firmware */
8274                 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
8275
8276 #ifdef GET_CUSTOM_MAC_ENABLE
8277         }
8278 #endif /* GET_CUSTOM_MAC_ENABLE */
8279
8280         /* get a capabilities from firmware */
8281         {
8282                 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
8283                 memset(dhd->fw_capabilities, 0, cap_buf_size);
8284                 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
8285                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
8286                         (cap_buf_size - 1), FALSE, 0)) < 0)
8287                 {
8288                         DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
8289                                 __FUNCTION__, ret));
8290                         return 0;
8291                 }
8292
8293                 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
8294                 dhd->fw_capabilities[0] = ' ';
8295                 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
8296                 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
8297         }
8298
8299         if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
8300                 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
8301 #ifdef SET_RANDOM_MAC_SOFTAP
8302                 uint rand_mac;
8303 #endif /* SET_RANDOM_MAC_SOFTAP */
8304                 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
8305 #if defined(ARP_OFFLOAD_SUPPORT)
8306                         arpoe = 0;
8307 #endif
8308 #ifdef PKT_FILTER_SUPPORT
8309                         dhd_pkt_filter_enable = FALSE;
8310 #endif
8311 #ifdef SET_RANDOM_MAC_SOFTAP
8312                 SRANDOM32((uint)jiffies);
8313                 rand_mac = RANDOM32();
8314                 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02;   /* local admin bit */
8315                 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
8316                 iovbuf[2] = (unsigned char)vendor_oui;
8317                 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
8318                 iovbuf[4] = (unsigned char)(rand_mac >> 8);
8319                 iovbuf[5] = (unsigned char)(rand_mac >> 16);
8320
8321                 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
8322                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8323                 if (ret < 0) {
8324                         DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
8325                 } else
8326                         memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
8327 #endif /* SET_RANDOM_MAC_SOFTAP */
8328 #if !defined(AP) && defined(WL_CFG80211)
8329                 /* Turn off MPC in AP mode */
8330                 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8331                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8332                         sizeof(iovbuf), TRUE, 0)) < 0) {
8333                         DHD_ERROR(("%s mpc for HostAPD failed  %d\n", __FUNCTION__, ret));
8334                 }
8335 #endif
8336 #ifdef USE_DYNAMIC_F2_BLKSIZE
8337                 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8338 #endif /* USE_DYNAMIC_F2_BLKSIZE */
8339 #ifdef SUPPORT_AP_POWERSAVE
8340                 dhd_set_ap_powersave(dhd, 0, TRUE);
8341 #endif /* SUPPORT_AP_POWERSAVE */
8342 #ifdef SOFTAP_UAPSD_OFF
8343                 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
8344                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8345                         sizeof(iovbuf), TRUE, 0)) < 0) {
8346                         DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
8347                                 __FUNCTION__, ret));
8348                 }
8349 #endif /* SOFTAP_UAPSD_OFF */
8350         } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8351                 (op_mode == DHD_FLAG_MFG_MODE)) {
8352 #if defined(ARP_OFFLOAD_SUPPORT)
8353                 arpoe = 0;
8354 #endif /* ARP_OFFLOAD_SUPPORT */
8355 #ifdef PKT_FILTER_SUPPORT
8356                 dhd_pkt_filter_enable = FALSE;
8357 #endif /* PKT_FILTER_SUPPORT */
8358                 dhd->op_mode = DHD_FLAG_MFG_MODE;
8359 #ifdef USE_DYNAMIC_F2_BLKSIZE
8360                 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8361 #endif /* USE_DYNAMIC_F2_BLKSIZE */
8362                 if (FW_SUPPORTED(dhd, rsdb)) {
8363                         rsdb_mode = 0;
8364                         bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
8365                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8366                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8367                                 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
8368                                         __FUNCTION__, ret));
8369                         }
8370                 }
8371         } else {
8372                 uint32 concurrent_mode = 0;
8373                 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
8374                         (op_mode == DHD_FLAG_P2P_MODE)) {
8375 #if defined(ARP_OFFLOAD_SUPPORT)
8376                         arpoe = 0;
8377 #endif
8378 #ifdef PKT_FILTER_SUPPORT
8379                         dhd_pkt_filter_enable = FALSE;
8380 #endif
8381                         dhd->op_mode = DHD_FLAG_P2P_MODE;
8382                 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
8383                         (op_mode == DHD_FLAG_IBSS_MODE)) {
8384                         dhd->op_mode = DHD_FLAG_IBSS_MODE;
8385                 } else
8386                         dhd->op_mode = DHD_FLAG_STA_MODE;
8387 #if !defined(AP) && defined(WLP2P)
8388                 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
8389                         (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
8390 #if defined(ARP_OFFLOAD_SUPPORT)
8391                         arpoe = 1;
8392 #endif
8393                         dhd->op_mode |= concurrent_mode;
8394                 }
8395
8396                 /* Check if we are enabling p2p */
8397                 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8398                         bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8399                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8400                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8401                                 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
8402                         }
8403
8404 #if defined(SOFTAP_AND_GC)
8405                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
8406                                 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
8407                                         DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
8408                         }
8409 #endif
8410                         memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
8411                         ETHER_SET_LOCALADDR(&p2p_ea);
8412                         bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
8413                                 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
8414                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8415                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8416                                 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
8417                         } else {
8418                                 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
8419                         }
8420                 }
8421 #else
8422                 (void)concurrent_mode;
8423 #endif 
8424         }
8425
8426 #ifdef RSDB_MODE_FROM_FILE
8427         (void)dhd_rsdb_mode_from_file(dhd);
8428 #endif /* RSDB_MODE_FROM_FILE */
8429
8430 #ifdef DISABLE_PRUNED_SCAN
8431         if (FW_SUPPORTED(dhd, rsdb)) {
8432                 memset(iovbuf, 0, sizeof(iovbuf));
8433                 bcm_mkiovar("scan_features", (char *)&scan_features,
8434                         4, iovbuf, sizeof(iovbuf));
8435                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
8436                         iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8437                         DHD_ERROR(("%s get scan_features is failed ret=%d\n",
8438                                 __FUNCTION__, ret));
8439                 } else {
8440                         memcpy(&scan_features, iovbuf, 4);
8441                         scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
8442                         memset(iovbuf, 0, sizeof(iovbuf));
8443                         bcm_mkiovar("scan_features", (char *)&scan_features,
8444                                 4, iovbuf, sizeof(iovbuf));
8445                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8446                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8447                                 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
8448                                         __FUNCTION__, ret));
8449                         }
8450                 }
8451         }
8452 #endif /* DISABLE_PRUNED_SCAN */
8453
8454         DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
8455                 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
8456         #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
8457         if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
8458                 dhd->info->rxthread_enabled = FALSE;
8459         else
8460                 dhd->info->rxthread_enabled = TRUE;
8461         #endif
8462         /* Set Country code  */
8463         if (dhd->dhd_cspec.ccode[0] != 0) {
8464                 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
8465                 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
8466                         sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
8467                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8468                         printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
8469         } else {
8470                 dhd_conf_set_country(dhd);
8471                 dhd_conf_fix_country(dhd);
8472         }
8473         dhd_conf_get_country(dhd, &dhd->dhd_cspec);
8474
8475
8476         /* Set Listen Interval */
8477         bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
8478         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8479                 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
8480
8481 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
8482 #ifdef USE_WFA_CERT_CONF
8483         if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
8484                 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
8485         }
8486 #endif /* USE_WFA_CERT_CONF */
8487         /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
8488         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
8489         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8490 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
8491 #if defined(ROAM_ENABLE)
8492         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
8493                 sizeof(roam_trigger), TRUE, 0)) < 0)
8494                 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
8495         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
8496                 sizeof(roam_scan_period), TRUE, 0)) < 0)
8497                 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
8498         if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
8499                 sizeof(roam_delta), TRUE, 0)) < 0)
8500                 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
8501         bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
8502         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8503                 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
8504 #endif /* ROAM_ENABLE */
8505         dhd_conf_set_roam(dhd);
8506
8507 #ifdef CUSTOM_EVENT_PM_WAKE
8508         bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
8509         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8510                 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
8511         }
8512 #endif /* CUSTOM_EVENT_PM_WAKE */
8513 #ifdef WLTDLS
8514 #ifdef ENABLE_TDLS_AUTO_MODE
8515         /* by default TDLS on and auto mode on */
8516         _dhd_tdls_enable(dhd, true, true, NULL);
8517 #else
8518         /* by default TDLS on and auto mode off */
8519         _dhd_tdls_enable(dhd, true, false, NULL);
8520 #endif /* ENABLE_TDLS_AUTO_MODE */
8521 #endif /* WLTDLS */
8522
8523 #ifdef DHD_ENABLE_LPC
8524         /* Set lpc 1 */
8525         bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8526         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8527                 sizeof(iovbuf), TRUE, 0)) < 0) {
8528                 DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
8529
8530                 if (ret == BCME_NOTDOWN) {
8531                         uint wl_down = 1;
8532                         ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
8533                                 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8534                         DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
8535
8536                         bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8537                         ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8538                         DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
8539                 }
8540         }
8541 #endif /* DHD_ENABLE_LPC */
8542         dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
8543
8544         /* Set PowerSave mode */
8545         if (dhd->conf->pm >= 0)
8546                 power_mode = dhd->conf->pm;
8547         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
8548
8549 #if defined(BCMSDIO)
8550         /* Match Host and Dongle rx alignment */
8551         bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
8552         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8553
8554 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8555         /* enable credall to reduce the chance of no bus credit happened. */
8556         bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
8557         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8558 #endif
8559
8560 #ifdef USE_WFA_CERT_CONF
8561         if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
8562                 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
8563         }
8564 #endif /* USE_WFA_CERT_CONF */
8565         if (glom != DEFAULT_GLOM_VALUE) {
8566                 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
8567                 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
8568                 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8569         }
8570 #endif /* defined(BCMSDIO) */
8571
8572         /* Setup timeout if Beacons are lost and roam is off to report link down */
8573         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
8574         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8575         /* Setup assoc_retry_max count to reconnect target AP in dongle */
8576         bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
8577         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8578 #if defined(AP) && !defined(WLP2P)
8579         /* Turn off MPC in AP mode */
8580         bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8581         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8582         bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8583         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8584 #endif /* defined(AP) && !defined(WLP2P) */
8585         /*  0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
8586         dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
8587         dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
8588         dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
8589         dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
8590         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
8591         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
8592         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
8593         dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
8594
8595 #ifdef MIMO_ANT_SETTING
8596         dhd_sel_ant_from_file(dhd);
8597 #endif /* MIMO_ANT_SETTING */
8598
8599 #if defined(SOFTAP)
8600         if (ap_fw_loaded == TRUE) {
8601                 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
8602         }
8603 #endif 
8604
8605 #if defined(KEEP_ALIVE)
8606         {
8607         /* Set Keep Alive : be sure to use FW with -keepalive */
8608         int res;
8609
8610 #if defined(SOFTAP)
8611         if (ap_fw_loaded == FALSE)
8612 #endif 
8613                 if (!(dhd->op_mode &
8614                         (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
8615                         if ((res = dhd_keep_alive_onoff(dhd)) < 0)
8616                                 DHD_ERROR(("%s set keeplive failed %d\n",
8617                                 __FUNCTION__, res));
8618                 }
8619         }
8620 #endif /* defined(KEEP_ALIVE) */
8621
8622 #ifdef USE_WL_TXBF
8623         bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
8624         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8625                 sizeof(iovbuf), TRUE, 0)) < 0) {
8626                 DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
8627         }
8628 #endif /* USE_WL_TXBF */
8629         dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
8630
8631 #ifdef USE_WFA_CERT_CONF
8632 #ifdef USE_WL_FRAMEBURST
8633          if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
8634                 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
8635          }
8636 #endif /* USE_WL_FRAMEBURST */
8637 #ifdef DISABLE_FRAMEBURST_VSDB
8638         g_frameburst = frameburst;
8639 #endif /* DISABLE_FRAMEBURST_VSDB */
8640 #endif /* USE_WFA_CERT_CONF */
8641 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
8642         /* Disable Framebursting for SofAP */
8643         if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
8644                 frameburst = 0;
8645         }
8646 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
8647         /* Set frameburst to value */
8648         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
8649                 sizeof(frameburst), TRUE, 0)) < 0) {
8650                 DHD_INFO(("%s frameburst not supported  %d\n", __FUNCTION__, ret));
8651         }
8652         dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE);
8653 #if defined(CUSTOM_AMPDU_BA_WSIZE)
8654         /* Set ampdu ba wsize to 64 or 16 */
8655 #ifdef CUSTOM_AMPDU_BA_WSIZE
8656         ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
8657 #endif
8658         if (ampdu_ba_wsize != 0) {
8659                 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
8660                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8661                         sizeof(iovbuf), TRUE, 0)) < 0) {
8662                         DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
8663                                 __FUNCTION__, ampdu_ba_wsize, ret));
8664                 }
8665         }
8666 #endif 
8667         dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
8668
8669         iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
8670         if (iov_buf == NULL) {
8671                 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
8672                 ret = BCME_NOMEM;
8673                 goto done;
8674         }
8675 #ifdef ENABLE_TEMP_THROTTLING
8676         if (dhd->op_mode & DHD_FLAG_STA_MODE) {
8677                 memset(&temp_control, 0, sizeof(temp_control));
8678                 temp_control.enable = 1;
8679                 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
8680                 bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
8681                                 sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
8682                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
8683                 if (ret < 0) {
8684                         DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
8685                                 __FUNCTION__, ret));
8686                 }
8687         }
8688 #endif /* ENABLE_TEMP_THROTTLING */
8689 #if defined(CUSTOM_AMPDU_MPDU)
8690         ampdu_mpdu = CUSTOM_AMPDU_MPDU;
8691         if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
8692                 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
8693                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8694                         sizeof(iovbuf), TRUE, 0)) < 0) {
8695                         DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
8696                                 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
8697                 }
8698         }
8699 #endif /* CUSTOM_AMPDU_MPDU */
8700
8701 #if defined(CUSTOM_AMPDU_RELEASE)
8702         ampdu_release = CUSTOM_AMPDU_RELEASE;
8703         if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
8704                 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
8705                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8706                         sizeof(iovbuf), TRUE, 0)) < 0) {
8707                         DHD_ERROR(("%s Set ampdu_release to %d failed  %d\n",
8708                                 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
8709                 }
8710         }
8711 #endif /* CUSTOM_AMPDU_RELEASE */
8712
8713 #if defined(CUSTOM_AMSDU_AGGSF)
8714         amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
8715         if (amsdu_aggsf != 0) {
8716                 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
8717                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8718                 if (ret < 0) {
8719                         DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
8720                                 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
8721                 }
8722         }
8723 #endif /* CUSTOM_AMSDU_AGGSF */
8724
8725 #ifdef CUSTOM_PSPRETEND_THR
8726         /* Turn off MPC in AP mode */
8727         bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
8728                 iovbuf, sizeof(iovbuf));
8729         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8730                 sizeof(iovbuf), TRUE, 0)) < 0) {
8731                 DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
8732                         __FUNCTION__, ret));
8733         }
8734 #endif
8735
8736         bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
8737         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8738                 sizeof(iovbuf), TRUE, 0)) < 0) {
8739                 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
8740         }
8741
8742         /* Read event_msgs mask */
8743         bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8744         if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8745                 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
8746                 goto done;
8747         }
8748         bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
8749
8750         /* Setup event_msgs */
8751         setbit(eventmask, WLC_E_SET_SSID);
8752         setbit(eventmask, WLC_E_PRUNE);
8753         setbit(eventmask, WLC_E_AUTH);
8754         setbit(eventmask, WLC_E_AUTH_IND);
8755         setbit(eventmask, WLC_E_ASSOC);
8756         setbit(eventmask, WLC_E_REASSOC);
8757         setbit(eventmask, WLC_E_REASSOC_IND);
8758         if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
8759                 setbit(eventmask, WLC_E_DEAUTH);
8760         setbit(eventmask, WLC_E_DEAUTH_IND);
8761         setbit(eventmask, WLC_E_DISASSOC_IND);
8762         setbit(eventmask, WLC_E_DISASSOC);
8763         setbit(eventmask, WLC_E_JOIN);
8764         setbit(eventmask, WLC_E_START);
8765         setbit(eventmask, WLC_E_ASSOC_IND);
8766         setbit(eventmask, WLC_E_PSK_SUP);
8767         setbit(eventmask, WLC_E_LINK);
8768         setbit(eventmask, WLC_E_MIC_ERROR);
8769         setbit(eventmask, WLC_E_ASSOC_REQ_IE);
8770         setbit(eventmask, WLC_E_ASSOC_RESP_IE);
8771 #ifndef WL_CFG80211
8772         setbit(eventmask, WLC_E_PMKID_CACHE);
8773         setbit(eventmask, WLC_E_TXFAIL);
8774 #endif
8775         setbit(eventmask, WLC_E_JOIN_START);
8776 //      setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
8777 #ifdef DHD_DEBUG
8778         setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
8779 #endif
8780 #ifdef WLMEDIA_HTSF
8781         setbit(eventmask, WLC_E_HTSFSYNC);
8782 #endif /* WLMEDIA_HTSF */
8783 #ifdef PNO_SUPPORT
8784         setbit(eventmask, WLC_E_PFN_NET_FOUND);
8785         setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
8786         setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
8787         setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
8788 #endif /* PNO_SUPPORT */
8789         /* enable dongle roaming event */
8790         setbit(eventmask, WLC_E_ROAM);
8791         setbit(eventmask, WLC_E_BSSID);
8792 #ifdef WLTDLS
8793         setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
8794 #endif /* WLTDLS */
8795 #ifdef WL_CFG80211
8796         setbit(eventmask, WLC_E_ESCAN_RESULT);
8797         setbit(eventmask, WLC_E_AP_STARTED);
8798         if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8799                 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
8800                 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
8801         }
8802 #endif /* WL_CFG80211 */
8803
8804 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
8805         if (dhd_logtrace_from_file(dhd)) {
8806                 setbit(eventmask, WLC_E_TRACE);
8807         } else {
8808                 clrbit(eventmask, WLC_E_TRACE);
8809         }
8810 #elif defined(SHOW_LOGTRACE)
8811         setbit(eventmask, WLC_E_TRACE);
8812 #else
8813         clrbit(eventmask, WLC_E_TRACE);
8814 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
8815
8816         setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
8817 #ifdef DHD_LOSSLESS_ROAMING
8818         setbit(eventmask, WLC_E_ROAM_PREP);
8819 #endif
8820 #ifdef CUSTOM_EVENT_PM_WAKE
8821         setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
8822 #endif /* CUSTOM_EVENT_PM_WAKE */
8823 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
8824         dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
8825 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
8826
8827         /* Write updated Event mask */
8828         bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8829         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8830                 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
8831                 goto done;
8832         }
8833
8834         /* make up event mask ext message iovar for event larger than 128 */
8835         msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
8836         eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
8837         if (eventmask_msg == NULL) {
8838                 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
8839                 ret = BCME_NOMEM;
8840                 goto done;
8841         }
8842         bzero(eventmask_msg, msglen);
8843         eventmask_msg->ver = EVENTMSGS_VER;
8844         eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8845
8846         /* Read event_msgs_ext mask */
8847         bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
8848         ret2  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
8849         if (ret2 == 0) { /* event_msgs_ext must be supported */
8850                 bcopy(iov_buf, eventmask_msg, msglen);
8851 #ifdef GSCAN_SUPPORT
8852                 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
8853                 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
8854                 setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
8855 #endif /* GSCAN_SUPPORT */
8856 #ifdef BT_WIFI_HANDOVER
8857                 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
8858 #endif /* BT_WIFI_HANDOVER */
8859
8860                 /* Write updated Event mask */
8861                 eventmask_msg->ver = EVENTMSGS_VER;
8862                 eventmask_msg->command = EVENTMSGS_SET_MASK;
8863                 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8864                 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
8865                         msglen, iov_buf, WLC_IOCTL_SMLEN);
8866                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8867                         iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
8868                         DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
8869                         goto done;
8870                 }
8871         } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
8872                 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
8873                 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
8874                         __FUNCTION__, ret2));
8875         } else {
8876                 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
8877                 ret = ret2;
8878                 goto done;
8879         }
8880
8881         dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
8882                 sizeof(scan_assoc_time), TRUE, 0);
8883         dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
8884                 sizeof(scan_unassoc_time), TRUE, 0);
8885         dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
8886                 sizeof(scan_passive_time), TRUE, 0);
8887
8888 #ifdef ARP_OFFLOAD_SUPPORT
8889         /* Set and enable ARP offload feature for STA only  */
8890 #if defined(SOFTAP)
8891         if (arpoe && !ap_fw_loaded)
8892 #else
8893         if (arpoe)
8894 #endif
8895         {
8896                 dhd_arp_offload_enable(dhd, TRUE);
8897                 dhd_arp_offload_set(dhd, dhd_arp_mode);
8898         } else {
8899                 dhd_arp_offload_enable(dhd, FALSE);
8900                 dhd_arp_offload_set(dhd, 0);
8901         }
8902         dhd_arp_enable = arpoe;
8903 #endif /* ARP_OFFLOAD_SUPPORT */
8904
8905 #ifdef PKT_FILTER_SUPPORT
8906         /* Setup default defintions for pktfilter , enable in suspend */
8907         if (dhd_master_mode) {
8908                 dhd->pktfilter_count = 6;
8909                 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
8910                 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
8911                 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8912                 /* apply APP pktfilter */
8913                 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
8914
8915                 /* Setup filter to allow only unicast */
8916                 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
8917
8918                 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
8919                 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
8920
8921 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
8922                 dhd->pktfilter_count = 4;
8923                 /* Setup filter to block broadcast and NAT Keepalive packets */
8924                 /* discard all broadcast packets */
8925                 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
8926                 /* discard NAT Keepalive packets */
8927                 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
8928                 /* discard NAT Keepalive packets */
8929                 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
8930                 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8931 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
8932         } else
8933                 dhd_conf_discard_pkt_filter(dhd);
8934         dhd_conf_add_pkt_filter(dhd);
8935
8936 #if defined(SOFTAP)
8937         if (ap_fw_loaded) {
8938                 dhd_enable_packet_filter(0, dhd);
8939         }
8940 #endif /* defined(SOFTAP) */
8941         dhd_set_packet_filter(dhd);
8942 #endif /* PKT_FILTER_SUPPORT */
8943 #ifdef DISABLE_11N
8944         bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
8945         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8946                 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
8947 #endif /* DISABLE_11N */
8948
8949 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
8950         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
8951         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8952 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8953         /* query for 'ver' to get version info from firmware */
8954         memset(buf, 0, sizeof(buf));
8955         ptr = buf;
8956         bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
8957         if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
8958                 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
8959         else {
8960                 bcmstrtok(&ptr, "\n", 0);
8961                 /* Print fw version info */
8962                 DHD_ERROR(("Firmware version = %s\n", buf));
8963                 strncpy(fw_version, buf, FW_VER_STR_LEN);
8964                 dhd_set_version_info(dhd, buf);
8965 #ifdef WRITE_WLANINFO
8966                 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
8967 #endif /* WRITE_WLANINFO */
8968         }
8969
8970 #if defined(BCMSDIO)
8971         dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
8972         // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
8973         dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 1, FALSE);
8974 #endif /* defined(BCMSDIO) */
8975
8976         dhd_conf_set_disable_proptx(dhd);
8977 #if defined(BCMSDIO)
8978 #ifdef PROP_TXSTATUS
8979         if (disable_proptx ||
8980 #ifdef PROP_TXSTATUS_VSDB
8981                 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
8982                 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
8983                  dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
8984 #endif /* PROP_TXSTATUS_VSDB */
8985                 FALSE) {
8986                 wlfc_enable = FALSE;
8987         }
8988
8989 #ifdef USE_WFA_CERT_CONF
8990         if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
8991                 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
8992                 wlfc_enable = proptx;
8993         }
8994 #endif /* USE_WFA_CERT_CONF */
8995
8996 #ifndef DISABLE_11N
8997         ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8998         bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
8999         if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
9000                 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
9001                 if (ret2 != BCME_UNSUPPORTED)
9002                         ret = ret2;
9003
9004                 if (ret == BCME_NOTDOWN) {
9005                         uint wl_down = 1;
9006                         ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
9007                                 sizeof(wl_down), TRUE, 0);
9008                         DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
9009                                 __FUNCTION__, ret2, hostreorder));
9010
9011                         bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
9012                                 iovbuf, sizeof(iovbuf));
9013                         ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9014                         DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
9015                         if (ret2 != BCME_UNSUPPORTED)
9016                                         ret = ret2;
9017                 }
9018                 if (ret2 != BCME_OK)
9019                         hostreorder = 0;
9020         }
9021 #endif /* DISABLE_11N */
9022
9023
9024         if (wlfc_enable)
9025                 dhd_wlfc_init(dhd);
9026 #ifndef DISABLE_11N
9027         else if (hostreorder)
9028                 dhd_wlfc_hostreorder_init(dhd);
9029 #endif /* DISABLE_11N */
9030
9031 #endif /* PROP_TXSTATUS */
9032 #endif /* BCMSDIO || BCMBUS */
9033 #ifdef PCIE_FULL_DONGLE
9034         /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
9035         if (FW_SUPPORTED(dhd, ap)) {
9036                 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
9037                 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
9038                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9039                         DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9040         }
9041 #endif /* PCIE_FULL_DONGLE */
9042 #ifdef PNO_SUPPORT
9043         if (!dhd->pno_state) {
9044                 dhd_pno_init(dhd);
9045         }
9046 #endif
9047 #ifdef WL11U
9048         dhd_interworking_enable(dhd);
9049 #endif /* WL11U */
9050 #ifndef WL_CFG80211
9051         dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
9052 #endif
9053
9054 #ifdef SUPPORT_SENSORHUB
9055         bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
9056         if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
9057                 FALSE, 0)) < 0) {
9058                 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
9059                         __FUNCTION__, ret));
9060                 dhd->info->shub_enable = 0;
9061         } else {
9062                 memcpy(&shub_enable, iovbuf, sizeof(uint32));
9063                 dhd->info->shub_enable = shub_enable;
9064                 DHD_ERROR(("%s: checking sensorhub enable %d\n",
9065                         __FUNCTION__, dhd->info->shub_enable));
9066         }
9067 #endif /* SUPPORT_SENSORHUB */
9068 done:
9069
9070         if (eventmask_msg)
9071                 kfree(eventmask_msg);
9072         if (iov_buf)
9073                 kfree(iov_buf);
9074
9075         return ret;
9076 }
9077
9078
9079 int
9080 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
9081 {
9082         char buf[strlen(name) + 1 + cmd_len];
9083         int len = sizeof(buf);
9084         wl_ioctl_t ioc;
9085         int ret;
9086
9087         len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
9088
9089         memset(&ioc, 0, sizeof(ioc));
9090
9091         ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
9092         ioc.buf = buf;
9093         ioc.len = len;
9094         ioc.set = set;
9095
9096         ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
9097         if (!set && ret >= 0)
9098                 memcpy(cmd_buf, buf, cmd_len);
9099
9100         return ret;
9101 }
9102
9103 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
9104 {
9105         struct dhd_info *dhd = dhdp->info;
9106         struct net_device *dev = NULL;
9107
9108         ASSERT(dhd && dhd->iflist[ifidx]);
9109         dev = dhd->iflist[ifidx]->net;
9110         ASSERT(dev);
9111
9112         if (netif_running(dev)) {
9113                 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
9114                 return BCME_NOTDOWN;
9115         }
9116
9117 #define DHD_MIN_MTU 1500
9118 #define DHD_MAX_MTU 1752
9119
9120         if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
9121                 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
9122                 return BCME_BADARG;
9123         }
9124
9125         dev->mtu = new_mtu;
9126         return 0;
9127 }
9128
9129 #ifdef ARP_OFFLOAD_SUPPORT
9130 /* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
9131 void
9132 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
9133 {
9134         u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
9135         int i;
9136         int ret;
9137
9138         bzero(ipv4_buf, sizeof(ipv4_buf));
9139
9140         /* display what we've got */
9141         ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9142         DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
9143 #ifdef AOE_DBG
9144         dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9145 #endif
9146         /* now we saved hoste_ip table, clr it in the dongle AOE */
9147         dhd_aoe_hostip_clr(dhd_pub, idx);
9148
9149         if (ret) {
9150                 DHD_ERROR(("%s failed\n", __FUNCTION__));
9151                 return;
9152         }
9153
9154         for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
9155                 if (add && (ipv4_buf[i] == 0)) {
9156                                 ipv4_buf[i] = ipa;
9157                                 add = FALSE; /* added ipa to local table  */
9158                                 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
9159                                 __FUNCTION__, i));
9160                 } else if (ipv4_buf[i] == ipa) {
9161                         ipv4_buf[i]     = 0;
9162                         DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
9163                                 __FUNCTION__, ipa, i));
9164                 }
9165
9166                 if (ipv4_buf[i] != 0) {
9167                         /* add back host_ip entries from our local cache */
9168                         dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
9169                         DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
9170                                 __FUNCTION__, ipv4_buf[i], i));
9171                 }
9172         }
9173 #ifdef AOE_DBG
9174         /* see the resulting hostip table */
9175         dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9176         DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
9177         dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9178 #endif
9179 }
9180
9181 /*
9182  * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
9183  * whenever there is an event related to an IP address.
9184  * ptr : kernel provided pointer to IP address that has changed
9185  */
9186 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
9187         unsigned long event,
9188         void *ptr)
9189 {
9190         struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
9191
9192         dhd_info_t *dhd;
9193         dhd_pub_t *dhd_pub;
9194         int idx;
9195
9196         if (!dhd_arp_enable)
9197                 return NOTIFY_DONE;
9198         if (!ifa || !(ifa->ifa_dev->dev))
9199                 return NOTIFY_DONE;
9200
9201 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9202         /* Filter notifications meant for non Broadcom devices */
9203         if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
9204             (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
9205 #if defined(WL_ENABLE_P2P_IF)
9206                 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
9207 #endif /* WL_ENABLE_P2P_IF */
9208                         return NOTIFY_DONE;
9209         }
9210 #endif /* LINUX_VERSION_CODE */
9211
9212         dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
9213         if (!dhd)
9214                 return NOTIFY_DONE;
9215
9216         dhd_pub = &dhd->pub;
9217
9218         if (dhd_pub->arp_version == 1) {
9219                 idx = 0;
9220         } else {
9221                 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
9222                         if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
9223                         break;
9224                 }
9225                 if (idx < DHD_MAX_IFS) {
9226                         DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
9227                                 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
9228                 } else {
9229                         DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
9230                         idx = 0;
9231                 }
9232         }
9233
9234         switch (event) {
9235                 case NETDEV_UP:
9236                         DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
9237                                 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9238
9239                         if (dhd->pub.busstate != DHD_BUS_DATA) {
9240                                 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
9241                                 if (dhd->pend_ipaddr) {
9242                                         DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
9243                                                 __FUNCTION__, dhd->pend_ipaddr));
9244                                 }
9245                                 dhd->pend_ipaddr = ifa->ifa_address;
9246                                 break;
9247                         }
9248
9249 #ifdef AOE_IP_ALIAS_SUPPORT
9250                         DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
9251                                 __FUNCTION__));
9252                         aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
9253 #endif /* AOE_IP_ALIAS_SUPPORT */
9254                         break;
9255
9256                 case NETDEV_DOWN:
9257                         DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
9258                                 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9259                         dhd->pend_ipaddr = 0;
9260 #ifdef AOE_IP_ALIAS_SUPPORT
9261                         DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
9262                                 __FUNCTION__));
9263                         aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
9264 #else
9265                         dhd_aoe_hostip_clr(&dhd->pub, idx);
9266                         dhd_aoe_arp_clr(&dhd->pub, idx);
9267 #endif /* AOE_IP_ALIAS_SUPPORT */
9268                         break;
9269
9270                 default:
9271                         DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
9272                                 __func__, ifa->ifa_label, event));
9273                         break;
9274         }
9275         return NOTIFY_DONE;
9276 }
9277 #endif /* ARP_OFFLOAD_SUPPORT */
9278
9279 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9280 /* Neighbor Discovery Offload: defered handler */
9281 static void
9282 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
9283 {
9284         struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
9285         dhd_pub_t       *pub = &((dhd_info_t *)dhd_info)->pub;
9286         int             ret;
9287
9288         if (event != DHD_WQ_WORK_IPV6_NDO) {
9289                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9290                 return;
9291         }
9292
9293         if (!ndo_work) {
9294                 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
9295                 return;
9296         }
9297
9298         if (!pub) {
9299                 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
9300                 return;
9301         }
9302
9303         if (ndo_work->if_idx) {
9304                 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
9305                 return;
9306         }
9307
9308         switch (ndo_work->event) {
9309                 case NETDEV_UP:
9310                         DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
9311                         ret = dhd_ndo_enable(pub, TRUE);
9312                         if (ret < 0) {
9313                                 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
9314                         }
9315
9316                         ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
9317                         if (ret < 0) {
9318                                 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
9319                                         __FUNCTION__, ret));
9320                         }
9321                         break;
9322                 case NETDEV_DOWN:
9323                         DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
9324                         ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
9325                         if (ret < 0) {
9326                                 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
9327                                         __FUNCTION__, ret));
9328                                 goto done;
9329                         }
9330
9331                         ret = dhd_ndo_enable(pub, FALSE);
9332                         if (ret < 0) {
9333                                 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
9334                                 goto done;
9335                         }
9336                         break;
9337                 default:
9338                         DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
9339                         break;
9340         }
9341 done:
9342         /* free ndo_work. alloced while scheduling the work */
9343         kfree(ndo_work);
9344
9345         return;
9346 }
9347
9348 /*
9349  * Neighbor Discovery Offload: Called when an interface
9350  * is assigned with ipv6 address.
9351  * Handles only primary interface
9352  */
9353 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
9354         unsigned long event,
9355         void *ptr)
9356 {
9357         dhd_info_t *dhd;
9358         dhd_pub_t *dhd_pub;
9359         struct inet6_ifaddr *inet6_ifa = ptr;
9360         struct in6_addr *ipv6_addr = &inet6_ifa->addr;
9361         struct ipv6_work_info_t *ndo_info;
9362         int idx = 0; /* REVISIT */
9363
9364 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9365         /* Filter notifications meant for non Broadcom devices */
9366         if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
9367                         return NOTIFY_DONE;
9368         }
9369 #endif /* LINUX_VERSION_CODE */
9370
9371         dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
9372         if (!dhd)
9373                 return NOTIFY_DONE;
9374
9375         if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
9376                 return NOTIFY_DONE;
9377         dhd_pub = &dhd->pub;
9378
9379         if (!FW_SUPPORTED(dhd_pub, ndoe))
9380                 return NOTIFY_DONE;
9381
9382         ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
9383         if (!ndo_info) {
9384                 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
9385                 return NOTIFY_DONE;
9386         }
9387
9388         ndo_info->event = event;
9389         ndo_info->if_idx = idx;
9390         memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
9391
9392         /* defer the work to thread as it may block kernel */
9393         dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
9394                 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
9395         return NOTIFY_DONE;
9396 }
9397 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9398
9399 int
9400 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
9401 {
9402         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9403         dhd_if_t *ifp;
9404         struct net_device *net = NULL;
9405         int err = 0;
9406         uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
9407
9408         DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9409
9410         ASSERT(dhd && dhd->iflist[ifidx]);
9411         ifp = dhd->iflist[ifidx];
9412         net = ifp->net;
9413         ASSERT(net && (ifp->idx == ifidx));
9414
9415 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9416         ASSERT(!net->open);
9417         net->get_stats = dhd_get_stats;
9418         net->do_ioctl = dhd_ioctl_entry;
9419         net->hard_start_xmit = dhd_start_xmit;
9420         net->set_mac_address = dhd_set_mac_address;
9421         net->set_multicast_list = dhd_set_multicast_list;
9422         net->open = net->stop = NULL;
9423 #else
9424         ASSERT(!net->netdev_ops);
9425         net->netdev_ops = &dhd_ops_virt;
9426 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9427
9428         /* Ok, link into the network layer... */
9429         if (ifidx == 0) {
9430                 /*
9431                  * device functions for the primary interface only
9432                  */
9433 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9434                 net->open = dhd_open;
9435                 net->stop = dhd_stop;
9436 #else
9437                 net->netdev_ops = &dhd_ops_pri;
9438 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9439                 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
9440                         memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9441         } else {
9442                 /*
9443                  * We have to use the primary MAC for virtual interfaces
9444                  */
9445                 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
9446                 /*
9447                  * Android sets the locally administered bit to indicate that this is a
9448                  * portable hotspot.  This will not work in simultaneous AP/STA mode,
9449                  * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
9450                  */
9451                 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
9452                         ETHER_ADDR_LEN)) {
9453                         DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
9454                         __func__, net->name));
9455                         temp_addr[0] |= 0x02;
9456                 }
9457         }
9458
9459         net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
9460 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
9461         net->ethtool_ops = &dhd_ethtool_ops;
9462 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
9463
9464 #if defined(WL_WIRELESS_EXT)
9465 #if WIRELESS_EXT < 19
9466         net->get_wireless_stats = dhd_get_wireless_stats;
9467 #endif /* WIRELESS_EXT < 19 */
9468 #if WIRELESS_EXT > 12
9469         net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
9470 #endif /* WIRELESS_EXT > 12 */
9471 #endif /* defined(WL_WIRELESS_EXT) */
9472
9473         dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9474
9475         memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
9476
9477         if (ifidx == 0)
9478                 printf("%s\n", dhd_version);
9479
9480         if (need_rtnl_lock)
9481                 err = register_netdev(net);
9482         else
9483                 err = register_netdevice(net);
9484
9485         if (err != 0) {
9486                 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
9487                 goto fail;
9488         }
9489
9490
9491
9492         printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
9493 #if defined(CUSTOMER_HW4_DEBUG)
9494                 MAC2STRDBG(dhd->pub.mac.octet));
9495 #else
9496                 MAC2STRDBG(net->dev_addr));
9497 #endif /* CUSTOMER_HW4_DEBUG */
9498
9499 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
9500 //              wl_iw_iscan_set_scan_broadcast_prep(net, 1);
9501 #endif
9502
9503 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
9504         KERNEL_VERSION(2, 6, 27))))
9505         if (ifidx == 0) {
9506 #ifdef BCMLXSDMMC
9507                 up(&dhd_registration_sem);
9508 #endif /* BCMLXSDMMC */
9509                 if (!dhd_download_fw_on_driverload) {
9510 #ifdef WL_CFG80211
9511                         wl_terminate_event_handler();
9512 #endif /* WL_CFG80211 */
9513 #if defined(DHD_LB) && defined(DHD_LB_RXP)
9514                         __skb_queue_purge(&dhd->rx_pend_queue);
9515 #endif /* DHD_LB && DHD_LB_RXP */
9516 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
9517                         dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
9518 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
9519                         dhd_net_bus_devreset(net, TRUE);
9520 #ifdef BCMLXSDMMC
9521                         dhd_net_bus_suspend(net);
9522 #endif /* BCMLXSDMMC */
9523                         wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
9524                 }
9525         }
9526 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
9527         return 0;
9528
9529 fail:
9530 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
9531         net->open = NULL;
9532 #else
9533         net->netdev_ops = NULL;
9534 #endif
9535         return err;
9536 }
9537
9538 void
9539 dhd_bus_detach(dhd_pub_t *dhdp)
9540 {
9541         dhd_info_t *dhd;
9542
9543         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9544
9545         if (dhdp) {
9546                 dhd = (dhd_info_t *)dhdp->info;
9547                 if (dhd) {
9548
9549                         /*
9550                          * In case of Android cfg80211 driver, the bus is down in dhd_stop,
9551                          *  calling stop again will cuase SD read/write errors.
9552                          */
9553                         if (dhd->pub.busstate != DHD_BUS_DOWN) {
9554                                 /* Stop the protocol module */
9555                                 dhd_prot_stop(&dhd->pub);
9556
9557                                 /* Stop the bus module */
9558                                 dhd_bus_stop(dhd->pub.bus, TRUE);
9559                         }
9560
9561 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
9562                         dhd_bus_oob_intr_unregister(dhdp);
9563 #endif 
9564                 }
9565         }
9566 }
9567
9568
9569 void dhd_detach(dhd_pub_t *dhdp)
9570 {
9571         dhd_info_t *dhd;
9572         unsigned long flags;
9573         int timer_valid = FALSE;
9574         struct net_device *dev;
9575
9576         if (!dhdp)
9577                 return;
9578
9579         dhd = (dhd_info_t *)dhdp->info;
9580         if (!dhd)
9581                 return;
9582
9583         dev = dhd->iflist[0]->net;
9584
9585         if (dev) {
9586                 rtnl_lock();
9587                 if (dev->flags & IFF_UP) {
9588                         /* If IFF_UP is still up, it indicates that
9589                          * "ifconfig wlan0 down" hasn't been called.
9590                          * So invoke dev_close explicitly here to
9591                          * bring down the interface.
9592                          */
9593                         DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
9594                         dev_close(dev);
9595                 }
9596                 rtnl_unlock();
9597         }
9598
9599         DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
9600
9601         dhd->pub.up = 0;
9602         if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
9603                 /* Give sufficient time for threads to start running in case
9604                  * dhd_attach() has failed
9605                  */
9606                 OSL_SLEEP(100);
9607         }
9608
9609 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9610 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9611
9612 #ifdef PROP_TXSTATUS
9613 #ifdef DHD_WLFC_THREAD
9614         if (dhd->pub.wlfc_thread) {
9615                 kthread_stop(dhd->pub.wlfc_thread);
9616                 dhdp->wlfc_thread_go = TRUE;
9617                 wake_up_interruptible(&dhdp->wlfc_wqhead);
9618         }
9619         dhd->pub.wlfc_thread = NULL;
9620 #endif /* DHD_WLFC_THREAD */
9621 #endif /* PROP_TXSTATUS */
9622
9623         if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
9624
9625                 dhd_bus_detach(dhdp);
9626 #ifdef BCMPCIE
9627                 if (is_reboot == SYS_RESTART) {
9628                         extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
9629                         if (dhd_wifi_platdata && !dhdp->dongle_reset) {
9630                                 dhdpcie_bus_clock_stop(dhdp->bus);
9631                                 wifi_platform_set_power(dhd_wifi_platdata->adapters,
9632                                         FALSE, WIFI_TURNOFF_DELAY);
9633                         }
9634                 }
9635 #endif /* BCMPCIE */
9636 #ifndef PCIE_FULL_DONGLE
9637                 if (dhdp->prot)
9638                         dhd_prot_detach(dhdp);
9639 #endif
9640         }
9641
9642 #ifdef ARP_OFFLOAD_SUPPORT
9643         if (dhd_inetaddr_notifier_registered) {
9644                 dhd_inetaddr_notifier_registered = FALSE;
9645                 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
9646         }
9647 #endif /* ARP_OFFLOAD_SUPPORT */
9648 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9649         if (dhd_inet6addr_notifier_registered) {
9650                 dhd_inet6addr_notifier_registered = FALSE;
9651                 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
9652         }
9653 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9654 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9655         if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
9656                 if (dhd->early_suspend.suspend)
9657                         unregister_early_suspend(&dhd->early_suspend);
9658         }
9659 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9660
9661 #if defined(WL_WIRELESS_EXT)
9662         if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
9663                 /* Detatch and unlink in the iw */
9664                 wl_iw_detach();
9665         }
9666 #endif /* defined(WL_WIRELESS_EXT) */
9667
9668         /* delete all interfaces, start with virtual  */
9669         if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
9670                 int i = 1;
9671                 dhd_if_t *ifp;
9672
9673                 /* Cleanup virtual interfaces */
9674                 dhd_net_if_lock_local(dhd);
9675                 for (i = 1; i < DHD_MAX_IFS; i++) {
9676                         if (dhd->iflist[i])
9677                                 dhd_remove_if(&dhd->pub, i, TRUE);
9678                 }
9679                 dhd_net_if_unlock_local(dhd);
9680
9681                 /*  delete primary interface 0 */
9682                 ifp = dhd->iflist[0];
9683                 ASSERT(ifp);
9684                 ASSERT(ifp->net);
9685                 if (ifp && ifp->net) {
9686
9687
9688
9689                         /* in unregister_netdev case, the interface gets freed by net->destructor
9690                          * (which is set to free_netdev)
9691                          */
9692                         if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9693                                 free_netdev(ifp->net);
9694                         } else {
9695 #ifdef SET_RPS_CPUS
9696                                 custom_rps_map_clear(ifp->net->_rx);
9697 #endif /* SET_RPS_CPUS */
9698                                 netif_tx_disable(ifp->net);
9699                                 unregister_netdev(ifp->net);
9700                         }
9701                         ifp->net = NULL;
9702 #ifdef DHD_WMF
9703                         dhd_wmf_cleanup(dhdp, 0);
9704 #endif /* DHD_WMF */
9705 #ifdef DHD_L2_FILTER
9706                         bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
9707                                 NULL, FALSE, dhdp->tickcnt);
9708                         deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
9709                         ifp->phnd_arp_table = NULL;
9710 #endif /* DHD_L2_FILTER */
9711
9712                         dhd_if_del_sta_list(ifp);
9713
9714                         MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
9715                         dhd->iflist[0] = NULL;
9716                 }
9717         }
9718
9719         /* Clear the watchdog timer */
9720         DHD_GENERAL_LOCK(&dhd->pub, flags);
9721         timer_valid = dhd->wd_timer_valid;
9722         dhd->wd_timer_valid = FALSE;
9723         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9724         if (timer_valid)
9725                 del_timer_sync(&dhd->timer);
9726         DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9727
9728         if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
9729 #ifdef DHD_PCIE_RUNTIMEPM
9730                 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
9731                         PROC_STOP(&dhd->thr_rpm_ctl);
9732                 }
9733 #endif /* DHD_PCIE_RUNTIMEPM */
9734                 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
9735                         PROC_STOP(&dhd->thr_wdt_ctl);
9736                 }
9737
9738                 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
9739                         PROC_STOP(&dhd->thr_rxf_ctl);
9740                 }
9741
9742                 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
9743                         PROC_STOP(&dhd->thr_dpc_ctl);
9744                 } else {
9745                         tasklet_kill(&dhd->tasklet);
9746 #ifdef DHD_LB_RXP
9747                         __skb_queue_purge(&dhd->rx_pend_queue);
9748 #endif /* DHD_LB_RXP */
9749                 }
9750         }
9751
9752 #if defined(DHD_LB)
9753         /* Kill the Load Balancing Tasklets */
9754 #if defined(DHD_LB_TXC)
9755         tasklet_disable(&dhd->tx_compl_tasklet);
9756         tasklet_kill(&dhd->tx_compl_tasklet);
9757 #endif /* DHD_LB_TXC */
9758 #if defined(DHD_LB_RXC)
9759         tasklet_disable(&dhd->rx_compl_tasklet);
9760         tasklet_kill(&dhd->rx_compl_tasklet);
9761 #endif /* DHD_LB_RXC */
9762         if (dhd->cpu_notifier.notifier_call != NULL)
9763                 unregister_cpu_notifier(&dhd->cpu_notifier);
9764         dhd_cpumasks_deinit(dhd);
9765 #endif /* DHD_LB */
9766
9767 #ifdef DHD_LOG_DUMP
9768         dhd_log_dump_deinit(&dhd->pub);
9769 #endif /* DHD_LOG_DUMP */
9770 #ifdef WL_CFG80211
9771         if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
9772                 wl_cfg80211_detach(NULL);
9773                 dhd_monitor_uninit();
9774         }
9775 #endif
9776         /* free deferred work queue */
9777         dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
9778         dhd->dhd_deferred_wq = NULL;
9779
9780 #ifdef SHOW_LOGTRACE
9781         if (dhd->event_data.fmts)
9782                 kfree(dhd->event_data.fmts);
9783         if (dhd->event_data.raw_fmts)
9784                 kfree(dhd->event_data.raw_fmts);
9785         if (dhd->event_data.raw_sstr)
9786                 kfree(dhd->event_data.raw_sstr);
9787 #endif /* SHOW_LOGTRACE */
9788
9789 #ifdef PNO_SUPPORT
9790         if (dhdp->pno_state)
9791                 dhd_pno_deinit(dhdp);
9792 #endif
9793 #if defined(CONFIG_PM_SLEEP)
9794         if (dhd_pm_notifier_registered) {
9795                 unregister_pm_notifier(&dhd->pm_notifier);
9796                 dhd_pm_notifier_registered = FALSE;
9797         }
9798 #endif /* CONFIG_PM_SLEEP */
9799
9800 #ifdef DEBUG_CPU_FREQ
9801                 if (dhd->new_freq)
9802                         free_percpu(dhd->new_freq);
9803                 dhd->new_freq = NULL;
9804                 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9805 #endif
9806         if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
9807                 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
9808 #ifdef CONFIG_HAS_WAKELOCK
9809                 dhd->wakelock_wd_counter = 0;
9810                 wake_lock_destroy(&dhd->wl_wdwake);
9811 #endif /* CONFIG_HAS_WAKELOCK */
9812                 DHD_OS_WAKE_LOCK_DESTROY(dhd);
9813         }
9814
9815
9816
9817 #ifdef DHDTCPACK_SUPPRESS
9818         /* This will free all MEM allocated for TCPACK SUPPRESS */
9819         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9820 #endif /* DHDTCPACK_SUPPRESS */
9821
9822 #ifdef PCIE_FULL_DONGLE
9823                 dhd_flow_rings_deinit(dhdp);
9824                 if (dhdp->prot)
9825                         dhd_prot_detach(dhdp);
9826 #endif
9827
9828
9829         dhd_sysfs_exit(dhd);
9830         dhd->pub.is_fw_download_done = FALSE;
9831         dhd_conf_detach(dhdp);
9832 }
9833
9834
9835 void
9836 dhd_free(dhd_pub_t *dhdp)
9837 {
9838         dhd_info_t *dhd;
9839         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9840
9841         if (dhdp) {
9842                 int i;
9843                 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9844                         if (dhdp->reorder_bufs[i]) {
9845                                 reorder_info_t *ptr;
9846                                 uint32 buf_size = sizeof(struct reorder_info);
9847
9848                                 ptr = dhdp->reorder_bufs[i];
9849
9850                                 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9851                                 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9852                                         i, ptr->max_idx, buf_size));
9853
9854                                 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9855                                 dhdp->reorder_bufs[i] = NULL;
9856                         }
9857                 }
9858
9859                 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
9860
9861                 dhd = (dhd_info_t *)dhdp->info;
9862                 if (dhdp->soc_ram) {
9863 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9864                         DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9865 #else
9866                         MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9867 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9868                         dhdp->soc_ram = NULL;
9869                 }
9870 #ifdef CACHE_FW_IMAGES
9871                 if (dhdp->cached_fw) {
9872                         MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
9873                         dhdp->cached_fw = NULL;
9874                 }
9875
9876                 if (dhdp->cached_nvram) {
9877                         MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
9878                         dhdp->cached_nvram = NULL;
9879                 }
9880 #endif
9881                 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
9882                 if (dhd &&
9883                         dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
9884                         MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
9885                 dhd = NULL;
9886         }
9887 }
9888
9889 void
9890 dhd_clear(dhd_pub_t *dhdp)
9891 {
9892         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9893
9894         if (dhdp) {
9895                 int i;
9896 #ifdef DHDTCPACK_SUPPRESS
9897                 /* Clean up timer/data structure for any remaining/pending packet or timer. */
9898                 dhd_tcpack_info_tbl_clean(dhdp);
9899 #endif /* DHDTCPACK_SUPPRESS */
9900                 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9901                         if (dhdp->reorder_bufs[i]) {
9902                                 reorder_info_t *ptr;
9903                                 uint32 buf_size = sizeof(struct reorder_info);
9904
9905                                 ptr = dhdp->reorder_bufs[i];
9906
9907                                 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9908                                 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9909                                         i, ptr->max_idx, buf_size));
9910
9911                                 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9912                                 dhdp->reorder_bufs[i] = NULL;
9913                         }
9914                 }
9915
9916                 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
9917
9918                 if (dhdp->soc_ram) {
9919 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9920                         DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9921 #else
9922                         MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9923 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9924                         dhdp->soc_ram = NULL;
9925                 }
9926         }
9927 }
9928
9929 static void
9930 dhd_module_cleanup(void)
9931 {
9932         printf("%s: Enter\n", __FUNCTION__);
9933
9934         dhd_bus_unregister();
9935
9936         wl_android_exit();
9937
9938         dhd_wifi_platform_unregister_drv();
9939         printf("%s: Exit\n", __FUNCTION__);
9940 }
9941
9942 static void
9943 dhd_module_exit(void)
9944 {
9945         dhd_buzzz_detach();
9946         dhd_module_cleanup();
9947         unregister_reboot_notifier(&dhd_reboot_notifier);
9948 }
9949
9950 static int
9951 dhd_module_init(void)
9952 {
9953         int err;
9954         int retry = 0;
9955
9956         printf("%s: in\n", __FUNCTION__);
9957
9958         dhd_buzzz_attach();
9959
9960         DHD_PERIM_RADIO_INIT();
9961
9962
9963         if (firmware_path[0] != '\0') {
9964                 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
9965                 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
9966         }
9967
9968         if (nvram_path[0] != '\0') {
9969                 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
9970                 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
9971         }
9972
9973         do {
9974                 err = dhd_wifi_platform_register_drv();
9975                 if (!err) {
9976                         register_reboot_notifier(&dhd_reboot_notifier);
9977                         break;
9978                 }
9979                 else {
9980                         DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
9981                                 __FUNCTION__, retry));
9982                         strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
9983                         firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
9984                         strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
9985                         nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
9986                 }
9987         } while (retry--);
9988
9989         if (err) {
9990                 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
9991         } else {
9992                 if (!dhd_download_fw_on_driverload) {
9993                         dhd_driver_init_done = TRUE;
9994                 }
9995         }
9996
9997         printf("%s: Exit err=%d\n", __FUNCTION__, err);
9998         return err;
9999 }
10000
10001 static int
10002 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
10003 {
10004         DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
10005         if (code == SYS_RESTART) {
10006 #ifdef BCMPCIE
10007                 is_reboot = code;
10008 #endif /* BCMPCIE */
10009         }
10010         return NOTIFY_DONE;
10011 }
10012
10013 static int wifi_init_thread(void *data)
10014 {
10015         dhd_module_init();
10016
10017         return 0;
10018 }
10019
10020 int __init rockchip_wifi_init_module_rkwifi(void)
10021 {
10022         struct task_struct *kthread = NULL;
10023
10024         kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
10025         if (IS_ERR(kthread))
10026                 pr_err("create wifi_init_thread failed.\n");
10027
10028         return 0;
10029 }
10030
10031 void __exit rockchip_wifi_exit_module_rkwifi(void)
10032 {
10033         dhd_module_exit();
10034 }
10035
10036 late_initcall(rockchip_wifi_init_module_rkwifi);
10037 module_exit(rockchip_wifi_exit_module_rkwifi);
10038
10039 #if 0
10040 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
10041 #if defined(CONFIG_DEFERRED_INITCALLS)
10042 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
10043         defined(CONFIG_ARCH_MSM8996)
10044 deferred_module_init_sync(dhd_module_init);
10045 #else
10046 deferred_module_init(dhd_module_init);
10047 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
10048         * CONFIG_ARCH_MSM8996
10049         */
10050 #elif defined(USE_LATE_INITCALL_SYNC)
10051 late_initcall_sync(dhd_module_init);
10052 #else
10053 late_initcall(dhd_module_init);
10054 #endif /* USE_LATE_INITCALL_SYNC */
10055 #else
10056 module_init(dhd_module_init);
10057 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
10058
10059 module_exit(dhd_module_exit);
10060
10061 #endif
10062 /*
10063  * OS specific functions required to implement DHD driver in OS independent way
10064  */
10065 int
10066 dhd_os_proto_block(dhd_pub_t *pub)
10067 {
10068         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10069
10070         if (dhd) {
10071                 DHD_PERIM_UNLOCK(pub);
10072
10073                 down(&dhd->proto_sem);
10074
10075                 DHD_PERIM_LOCK(pub);
10076                 return 1;
10077         }
10078
10079         return 0;
10080 }
10081
10082 int
10083 dhd_os_proto_unblock(dhd_pub_t *pub)
10084 {
10085         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10086
10087         if (dhd) {
10088                 up(&dhd->proto_sem);
10089                 return 1;
10090         }
10091
10092         return 0;
10093 }
10094
10095 void
10096 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
10097 {
10098         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10099
10100         if (dhd) {
10101                 mutex_lock(&dhd->dhd_iovar_mutex);
10102         }
10103 }
10104
10105 void
10106 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
10107 {
10108         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10109
10110         if (dhd) {
10111                 mutex_unlock(&dhd->dhd_iovar_mutex);
10112         }
10113 }
10114
10115 unsigned int
10116 dhd_os_get_ioctl_resp_timeout(void)
10117 {
10118         return ((unsigned int)dhd_ioctl_timeout_msec);
10119 }
10120
10121 void
10122 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
10123 {
10124         dhd_ioctl_timeout_msec = (int)timeout_msec;
10125 }
10126
10127 int
10128 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
10129 {
10130         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10131         int timeout;
10132
10133         /* Convert timeout in millsecond to jiffies */
10134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10135         timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10136 #else
10137         timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10138 #endif
10139
10140         DHD_PERIM_UNLOCK(pub);
10141
10142         timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
10143
10144         DHD_PERIM_LOCK(pub);
10145
10146         return timeout;
10147 }
10148
10149 int
10150 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
10151 {
10152         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10153
10154         wake_up(&dhd->ioctl_resp_wait);
10155         return 0;
10156 }
10157
10158 int
10159 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
10160 {
10161         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10162         int timeout;
10163
10164         /* Convert timeout in millsecond to jiffies */
10165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10166         timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10167 #else
10168         timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10169 #endif
10170
10171         DHD_PERIM_UNLOCK(pub);
10172
10173         timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
10174
10175         DHD_PERIM_LOCK(pub);
10176
10177         return timeout;
10178 }
10179
10180 int
10181 dhd_os_d3ack_wake(dhd_pub_t *pub)
10182 {
10183         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10184
10185         wake_up(&dhd->d3ack_wait);
10186         return 0;
10187 }
10188
10189 int
10190 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
10191 {
10192         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10193         int timeout;
10194
10195         /* Wait for bus usage contexts to gracefully exit within some timeout value
10196          * Set time out to little higher than dhd_ioctl_timeout_msec,
10197          * so that IOCTL timeout should not get affected.
10198          */
10199         /* Convert timeout in millsecond to jiffies */
10200 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10201         timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
10202 #else
10203         timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
10204 #endif
10205
10206         timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
10207
10208         return timeout;
10209 }
10210
10211 int INLINE
10212 dhd_os_busbusy_wake(dhd_pub_t *pub)
10213 {
10214         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10215         /* Call wmb() to make sure before waking up the other event value gets updated */
10216         OSL_SMP_WMB();
10217         wake_up(&dhd->dhd_bus_busy_state_wait);
10218         return 0;
10219 }
10220
10221 void
10222 dhd_os_wd_timer_extend(void *bus, bool extend)
10223 {
10224         dhd_pub_t *pub = bus;
10225         dhd_info_t *dhd = (dhd_info_t *)pub->info;
10226
10227         if (extend)
10228                 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
10229         else
10230                 dhd_os_wd_timer(bus, dhd->default_wd_interval);
10231 }
10232
10233
10234 void
10235 dhd_os_wd_timer(void *bus, uint wdtick)
10236 {
10237         dhd_pub_t *pub = bus;
10238         dhd_info_t *dhd = (dhd_info_t *)pub->info;
10239         unsigned long flags;
10240
10241         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10242
10243         if (!dhd) {
10244                 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10245                 return;
10246         }
10247
10248         DHD_OS_WD_WAKE_LOCK(pub);
10249         DHD_GENERAL_LOCK(pub, flags);
10250
10251         /* don't start the wd until fw is loaded */
10252         if (pub->busstate == DHD_BUS_DOWN) {
10253                 DHD_GENERAL_UNLOCK(pub, flags);
10254                 if (!wdtick)
10255                         DHD_OS_WD_WAKE_UNLOCK(pub);
10256                 return;
10257         }
10258
10259         /* Totally stop the timer */
10260         if (!wdtick && dhd->wd_timer_valid == TRUE) {
10261                 dhd->wd_timer_valid = FALSE;
10262                 DHD_GENERAL_UNLOCK(pub, flags);
10263                 del_timer_sync(&dhd->timer);
10264                 DHD_OS_WD_WAKE_UNLOCK(pub);
10265                 return;
10266         }
10267
10268         if (wdtick) {
10269                 DHD_OS_WD_WAKE_LOCK(pub);
10270                 dhd_watchdog_ms = (uint)wdtick;
10271                 /* Re arm the timer, at last watchdog period */
10272                 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
10273                 dhd->wd_timer_valid = TRUE;
10274         }
10275         DHD_GENERAL_UNLOCK(pub, flags);
10276         DHD_OS_WD_WAKE_UNLOCK(pub);
10277 }
10278
10279 #ifdef DHD_PCIE_RUNTIMEPM
10280 void
10281 dhd_os_runtimepm_timer(void *bus, uint tick)
10282 {
10283         dhd_pub_t *pub = bus;
10284         dhd_info_t *dhd = (dhd_info_t *)pub->info;
10285         unsigned long flags;
10286
10287         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10288
10289         if (!dhd) {
10290                 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
10291                 return;
10292         }
10293
10294         DHD_GENERAL_LOCK(pub, flags);
10295
10296         /* don't start the RPM until fw is loaded */
10297         if (pub->busstate == DHD_BUS_DOWN ||
10298                         pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
10299                 DHD_GENERAL_UNLOCK(pub, flags);
10300                 return;
10301         }
10302
10303         /* If tick is non-zero, the request is to start the timer */
10304         if (tick) {
10305                 /* Start the timer only if its not already running */
10306                 if (dhd->rpm_timer_valid == FALSE) {
10307                         mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
10308                         dhd->rpm_timer_valid = TRUE;
10309                 }
10310         } else {
10311                 /* tick is zero, we have to stop the timer */
10312                 /* Stop the timer only if its running, otherwise we don't have to do anything */
10313                 if (dhd->rpm_timer_valid == TRUE) {
10314                         dhd->rpm_timer_valid = FALSE;
10315                         DHD_GENERAL_UNLOCK(pub, flags);
10316                         del_timer_sync(&dhd->rpm_timer);
10317                         /* we have already released the lock, so just go to exit */
10318                         goto exit;
10319                 }
10320         }
10321
10322         DHD_GENERAL_UNLOCK(pub, flags);
10323 exit:
10324         return;
10325
10326 }
10327
10328 #endif /* DHD_PCIE_RUNTIMEPM */
10329
10330 void *
10331 dhd_os_open_image(char *filename)
10332 {
10333         struct file *fp;
10334         int size;
10335
10336         fp = filp_open(filename, O_RDONLY, 0);
10337         /*
10338          * 2.6.11 (FC4) supports filp_open() but later revs don't?
10339          * Alternative:
10340          * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
10341          * ???
10342          */
10343          if (IS_ERR(fp)) {
10344                  fp = NULL;
10345                  goto err;
10346          }
10347
10348          if (!S_ISREG(file_inode(fp)->i_mode)) {
10349                  DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
10350                  fp = NULL;
10351                  goto err;
10352          }
10353
10354          size = i_size_read(file_inode(fp));
10355          if (size <= 0) {
10356                  DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
10357                  fp = NULL;
10358                  goto err;
10359          }
10360
10361          DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
10362
10363 err:
10364          return fp;
10365 }
10366
10367 int
10368 dhd_os_get_image_block(char *buf, int len, void *image)
10369 {
10370         struct file *fp = (struct file *)image;
10371         int rdlen;
10372         int size;
10373
10374         if (!image)
10375                 return 0;
10376
10377         size = i_size_read(file_inode(fp));
10378         rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
10379
10380         if (len >= size && size != rdlen) {
10381                 return -EIO;
10382         }
10383
10384         if (rdlen > 0)
10385                 fp->f_pos += rdlen;
10386
10387         return rdlen;
10388 }
10389
10390 void
10391 dhd_os_close_image(void *image)
10392 {
10393         if (image)
10394                 filp_close((struct file *)image, NULL);
10395 }
10396
10397 void
10398 dhd_os_sdlock(dhd_pub_t *pub)
10399 {
10400         dhd_info_t *dhd;
10401
10402         dhd = (dhd_info_t *)(pub->info);
10403
10404         if (dhd_dpc_prio >= 0)
10405                 down(&dhd->sdsem);
10406         else
10407                 spin_lock_bh(&dhd->sdlock);
10408 }
10409
10410 void
10411 dhd_os_sdunlock(dhd_pub_t *pub)
10412 {
10413         dhd_info_t *dhd;
10414
10415         dhd = (dhd_info_t *)(pub->info);
10416
10417         if (dhd_dpc_prio >= 0)
10418                 up(&dhd->sdsem);
10419         else
10420                 spin_unlock_bh(&dhd->sdlock);
10421 }
10422
10423 void
10424 dhd_os_sdlock_txq(dhd_pub_t *pub)
10425 {
10426         dhd_info_t *dhd;
10427
10428         dhd = (dhd_info_t *)(pub->info);
10429         spin_lock_bh(&dhd->txqlock);
10430 }
10431
10432 void
10433 dhd_os_sdunlock_txq(dhd_pub_t *pub)
10434 {
10435         dhd_info_t *dhd;
10436
10437         dhd = (dhd_info_t *)(pub->info);
10438         spin_unlock_bh(&dhd->txqlock);
10439 }
10440
10441 void
10442 dhd_os_sdlock_rxq(dhd_pub_t *pub)
10443 {
10444 }
10445
10446 void
10447 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
10448 {
10449 }
10450
10451 static void
10452 dhd_os_rxflock(dhd_pub_t *pub)
10453 {
10454         dhd_info_t *dhd;
10455
10456         dhd = (dhd_info_t *)(pub->info);
10457         spin_lock_bh(&dhd->rxf_lock);
10458
10459 }
10460
10461 static void
10462 dhd_os_rxfunlock(dhd_pub_t *pub)
10463 {
10464         dhd_info_t *dhd;
10465
10466         dhd = (dhd_info_t *)(pub->info);
10467         spin_unlock_bh(&dhd->rxf_lock);
10468 }
10469
10470 #ifdef DHDTCPACK_SUPPRESS
10471 unsigned long
10472 dhd_os_tcpacklock(dhd_pub_t *pub)
10473 {
10474         dhd_info_t *dhd;
10475         unsigned long flags = 0;
10476
10477         dhd = (dhd_info_t *)(pub->info);
10478
10479         if (dhd) {
10480 #ifdef BCMSDIO
10481                 spin_lock_bh(&dhd->tcpack_lock);
10482 #else
10483                 spin_lock_irqsave(&dhd->tcpack_lock, flags);
10484 #endif /* BCMSDIO */
10485         }
10486
10487         return flags;
10488 }
10489
10490 void
10491 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
10492 {
10493         dhd_info_t *dhd;
10494
10495 #ifdef BCMSDIO
10496         BCM_REFERENCE(flags);
10497 #endif /* BCMSDIO */
10498
10499         dhd = (dhd_info_t *)(pub->info);
10500
10501         if (dhd) {
10502 #ifdef BCMSDIO
10503                 spin_lock_bh(&dhd->tcpack_lock);
10504 #else
10505                 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
10506 #endif /* BCMSDIO */
10507         }
10508 }
10509 #endif /* DHDTCPACK_SUPPRESS */
10510
10511 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
10512 {
10513         uint8* buf;
10514         gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
10515
10516         buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
10517         if (buf == NULL && kmalloc_if_fail)
10518                 buf = kmalloc(size, flags);
10519
10520         return buf;
10521 }
10522
10523 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
10524 {
10525 }
10526
10527 #if defined(WL_WIRELESS_EXT)
10528 struct iw_statistics *
10529 dhd_get_wireless_stats(struct net_device *dev)
10530 {
10531         int res = 0;
10532         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10533
10534         if (!dhd->pub.up) {
10535                 return NULL;
10536         }
10537
10538         res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
10539
10540         if (res == 0)
10541                 return &dhd->iw.wstats;
10542         else
10543                 return NULL;
10544 }
10545 #endif /* defined(WL_WIRELESS_EXT) */
10546
10547 static int
10548 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
10549         wl_event_msg_t *event, void **data)
10550 {
10551         int bcmerror = 0;
10552         ASSERT(dhd != NULL);
10553
10554 #ifdef SHOW_LOGTRACE
10555                 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
10556 #else
10557                 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
10558 #endif /* SHOW_LOGTRACE */
10559
10560         if (bcmerror != BCME_OK)
10561                 return (bcmerror);
10562
10563 #if defined(WL_WIRELESS_EXT)
10564         if (event->bsscfgidx == 0) {
10565                 /*
10566                  * Wireless ext is on primary interface only
10567                  */
10568
10569         ASSERT(dhd->iflist[*ifidx] != NULL);
10570         ASSERT(dhd->iflist[*ifidx]->net != NULL);
10571
10572                 if (dhd->iflist[*ifidx]->net) {
10573                 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
10574                 }
10575         }
10576 #endif /* defined(WL_WIRELESS_EXT)  */
10577
10578 #ifdef WL_CFG80211
10579         ASSERT(dhd->iflist[*ifidx] != NULL);
10580         ASSERT(dhd->iflist[*ifidx]->net != NULL);
10581         if (dhd->iflist[*ifidx]->net)
10582                 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
10583 #endif /* defined(WL_CFG80211) */
10584
10585         return (bcmerror);
10586 }
10587
10588 /* send up locally generated event */
10589 void
10590 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
10591 {
10592         switch (ntoh32(event->event_type)) {
10593
10594         default:
10595                 break;
10596         }
10597 }
10598
10599 #ifdef LOG_INTO_TCPDUMP
10600 void
10601 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
10602 {
10603         struct sk_buff *p, *skb;
10604         uint32 pktlen;
10605         int len;
10606         dhd_if_t *ifp;
10607         dhd_info_t *dhd;
10608         uchar *skb_data;
10609         int ifidx = 0;
10610         struct ether_header eth;
10611
10612         pktlen = sizeof(eth) + data_len;
10613         dhd = dhdp->info;
10614
10615         if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
10616                 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
10617
10618                 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
10619                 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
10620                 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
10621                 eth.ether_type = hton16(ETHER_TYPE_BRCM);
10622
10623                 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
10624                 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
10625                 skb = PKTTONATIVE(dhdp->osh, p);
10626                 skb_data = skb->data;
10627                 len = skb->len;
10628
10629                 ifidx = dhd_ifname2idx(dhd, "wlan0");
10630                 ifp = dhd->iflist[ifidx];
10631                 if (ifp == NULL)
10632                          ifp = dhd->iflist[0];
10633
10634                 ASSERT(ifp);
10635                 skb->dev = ifp->net;
10636                 skb->protocol = eth_type_trans(skb, skb->dev);
10637                 skb->data = skb_data;
10638                 skb->len = len;
10639
10640                 /* Strip header, count, deliver upward */
10641                 skb_pull(skb, ETH_HLEN);
10642
10643                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
10644                         __FUNCTION__, __LINE__);
10645                 /* Send the packet */
10646                 if (in_interrupt()) {
10647                         netif_rx(skb);
10648                 } else {
10649                         netif_rx_ni(skb);
10650                 }
10651         }
10652         else {
10653                 /* Could not allocate a sk_buf */
10654                 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
10655         }
10656 }
10657 #endif /* LOG_INTO_TCPDUMP */
10658
10659 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
10660 {
10661 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10662         struct dhd_info *dhdinfo =  dhd->info;
10663
10664 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10665         int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
10666 #else
10667         int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
10668 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
10669
10670         dhd_os_sdunlock(dhd);
10671         wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
10672         dhd_os_sdlock(dhd);
10673 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
10674         return;
10675 }
10676
10677 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
10678 {
10679 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10680         struct dhd_info *dhdinfo =  dhd->info;
10681         if (waitqueue_active(&dhdinfo->ctrl_wait))
10682                 wake_up(&dhdinfo->ctrl_wait);
10683 #endif
10684         return;
10685 }
10686
10687 #if defined(BCMSDIO) || defined(BCMPCIE)
10688 int
10689 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
10690 {
10691         int ret;
10692
10693         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10694
10695         if (flag == TRUE) {
10696                 /* Issue wl down command before resetting the chip */
10697                 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
10698                         DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
10699                 }
10700 #ifdef PROP_TXSTATUS
10701                 if (dhd->pub.wlfc_enabled)
10702                         dhd_wlfc_deinit(&dhd->pub);
10703 #endif /* PROP_TXSTATUS */
10704 #ifdef PNO_SUPPORT
10705         if (dhd->pub.pno_state)
10706                 dhd_pno_deinit(&dhd->pub);
10707 #endif
10708         }
10709
10710 #ifdef BCMSDIO
10711         if (!flag) {
10712                 dhd_update_fw_nv_path(dhd);
10713                 /* update firmware and nvram path to sdio bus */
10714                 dhd_bus_update_fw_nv_path(dhd->pub.bus,
10715                         dhd->fw_path, dhd->nv_path, dhd->conf_path);
10716         }
10717 #endif /* BCMSDIO */
10718
10719         ret = dhd_bus_devreset(&dhd->pub, flag);
10720         if (ret) {
10721                 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
10722                 return ret;
10723         }
10724
10725         return ret;
10726 }
10727
10728 #ifdef BCMSDIO
10729 int
10730 dhd_net_bus_suspend(struct net_device *dev)
10731 {
10732         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10733         return dhd_bus_suspend(&dhd->pub);
10734 }
10735
10736 int
10737 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
10738 {
10739         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10740         return dhd_bus_resume(&dhd->pub, stage);
10741 }
10742
10743 #endif /* BCMSDIO */
10744 #endif /* BCMSDIO || BCMPCIE */
10745
10746 int net_os_set_suspend_disable(struct net_device *dev, int val)
10747 {
10748         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10749         int ret = 0;
10750
10751         if (dhd) {
10752                 ret = dhd->pub.suspend_disable_flag;
10753                 dhd->pub.suspend_disable_flag = val;
10754         }
10755         return ret;
10756 }
10757
10758 int net_os_set_suspend(struct net_device *dev, int val, int force)
10759 {
10760         int ret = 0;
10761         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10762
10763         if (dhd) {
10764 #ifdef CONFIG_MACH_UNIVERSAL7420
10765 #endif /* CONFIG_MACH_UNIVERSAL7420 */
10766 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
10767                 ret = dhd_set_suspend(val, &dhd->pub);
10768 #else
10769                 ret = dhd_suspend_resume_helper(dhd, val, force);
10770 #endif
10771 #ifdef WL_CFG80211
10772                 wl_cfg80211_update_power_mode(dev);
10773 #endif
10774         }
10775         return ret;
10776 }
10777
10778 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
10779 {
10780         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10781
10782         if (dhd)
10783                 dhd->pub.suspend_bcn_li_dtim = val;
10784
10785         return 0;
10786 }
10787
10788 #ifdef PKT_FILTER_SUPPORT
10789 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
10790 {
10791 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
10792         return 0;
10793 #else
10794         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10795         char *filterp = NULL;
10796         int filter_id = 0;
10797         int ret = 0;
10798
10799         if (!dhd_master_mode)
10800                 add_remove = !add_remove;
10801         DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
10802         if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
10803                 return ret;
10804         if (num >= dhd->pub.pktfilter_count)
10805                 return -EINVAL;
10806         switch (num) {
10807                 case DHD_BROADCAST_FILTER_NUM:
10808                         filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
10809                         filter_id = 101;
10810                         break;
10811                 case DHD_MULTICAST4_FILTER_NUM:
10812                         filterp = "102 0 0 0 0xFFFFFF 0x01005E";
10813                         filter_id = 102;
10814                         break;
10815                 case DHD_MULTICAST6_FILTER_NUM:
10816                         filterp = "103 0 0 0 0xFFFF 0x3333";
10817                         filter_id = 103;
10818                         break;
10819                 case DHD_MDNS_FILTER_NUM:
10820                         filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
10821                         filter_id = 104;
10822                         break;
10823                 default:
10824                         return -EINVAL;
10825         }
10826
10827         /* Add filter */
10828         if (add_remove) {
10829                 dhd->pub.pktfilter[num] = filterp;
10830                 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
10831         } else { /* Delete filter */
10832                 if (dhd->pub.pktfilter[num] != NULL) {
10833                         dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
10834                         dhd->pub.pktfilter[num] = NULL;
10835                 }
10836         }
10837         return ret;
10838 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
10839 }
10840
10841 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
10842
10843 {
10844         int ret = 0;
10845
10846         /* Packet filtering is set only if we still in early-suspend and
10847          * we need either to turn it ON or turn it OFF
10848          * We can always turn it OFF in case of early-suspend, but we turn it
10849          * back ON only if suspend_disable_flag was not set
10850         */
10851         if (dhdp && dhdp->up) {
10852                 if (dhdp->in_suspend) {
10853                         if (!val || (val && !dhdp->suspend_disable_flag))
10854                                 dhd_enable_packet_filter(val, dhdp);
10855                 }
10856         }
10857         return ret;
10858 }
10859
10860 /* function to enable/disable packet for Network device */
10861 int net_os_enable_packet_filter(struct net_device *dev, int val)
10862 {
10863         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10864
10865         DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
10866         return dhd_os_enable_packet_filter(&dhd->pub, val);
10867 }
10868 #endif /* PKT_FILTER_SUPPORT */
10869
10870 int
10871 dhd_dev_init_ioctl(struct net_device *dev)
10872 {
10873         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10874         int ret;
10875
10876         if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
10877                 goto done;
10878
10879 done:
10880         return ret;
10881 }
10882
10883 int
10884 dhd_dev_get_feature_set(struct net_device *dev)
10885 {
10886         dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
10887         dhd_pub_t *dhd = (&ptr->pub);
10888         int feature_set = 0;
10889
10890 #ifdef DYNAMIC_SWOOB_DURATION
10891 #ifndef CUSTOM_INTR_WIDTH
10892 #define CUSTOM_INTR_WIDTH 100
10893         int intr_width = 0;
10894 #endif /* CUSTOM_INTR_WIDTH */
10895 #endif /* DYNAMIC_SWOOB_DURATION */
10896         if (!dhd)
10897                 return feature_set;
10898
10899         if (FW_SUPPORTED(dhd, sta))
10900                 feature_set |= WIFI_FEATURE_INFRA;
10901         if (FW_SUPPORTED(dhd, dualband))
10902                 feature_set |= WIFI_FEATURE_INFRA_5G;
10903         if (FW_SUPPORTED(dhd, p2p))
10904                 feature_set |= WIFI_FEATURE_P2P;
10905         if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
10906                 feature_set |= WIFI_FEATURE_SOFT_AP;
10907         if (FW_SUPPORTED(dhd, tdls))
10908                 feature_set |= WIFI_FEATURE_TDLS;
10909         if (FW_SUPPORTED(dhd, vsdb))
10910                 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
10911         if (FW_SUPPORTED(dhd, nan)) {
10912                 feature_set |= WIFI_FEATURE_NAN;
10913                 /* NAN is essentail for d2d rtt */
10914                 if (FW_SUPPORTED(dhd, rttd2d))
10915                         feature_set |= WIFI_FEATURE_D2D_RTT;
10916         }
10917 #ifdef RTT_SUPPORT
10918         feature_set |= WIFI_FEATURE_D2AP_RTT;
10919 #endif /* RTT_SUPPORT */
10920 #ifdef LINKSTAT_SUPPORT
10921         feature_set |= WIFI_FEATURE_LINKSTAT;
10922 #endif /* LINKSTAT_SUPPORT */
10923         /* Supports STA + STA always */
10924         feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
10925 #ifdef PNO_SUPPORT
10926         if (dhd_is_pno_supported(dhd)) {
10927                 feature_set |= WIFI_FEATURE_PNO;
10928                 feature_set |= WIFI_FEATURE_BATCH_SCAN;
10929 #ifdef GSCAN_SUPPORT
10930                 feature_set |= WIFI_FEATURE_GSCAN;
10931 #endif /* GSCAN_SUPPORT */
10932         }
10933 #endif /* PNO_SUPPORT */
10934 #ifdef WL11U
10935         feature_set |= WIFI_FEATURE_HOTSPOT;
10936 #endif /* WL11U */
10937         return feature_set;
10938 }
10939
10940
10941 int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
10942 {
10943         int feature_set_full, mem_needed;
10944         int *ret;
10945
10946         *num = 0;
10947         mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10948         ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
10949         if (!ret) {
10950                 DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
10951                         mem_needed));
10952                 return ret;
10953         }
10954
10955         feature_set_full = dhd_dev_get_feature_set(dev);
10956
10957         ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
10958                  (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10959                  (feature_set_full & WIFI_FEATURE_NAN) |
10960                  (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10961                  (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10962                  (feature_set_full & WIFI_FEATURE_PNO) |
10963                  (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
10964                  (feature_set_full & WIFI_FEATURE_GSCAN) |
10965                  (feature_set_full & WIFI_FEATURE_HOTSPOT) |
10966                  (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
10967                  (feature_set_full & WIFI_FEATURE_EPR);
10968
10969         ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
10970                  (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10971                  /* Not yet verified NAN with P2P */
10972                  /* (feature_set_full & WIFI_FEATURE_NAN) | */
10973                  (feature_set_full & WIFI_FEATURE_P2P) |
10974                  (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10975                  (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10976                  (feature_set_full & WIFI_FEATURE_EPR);
10977
10978         ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
10979                  (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10980                  (feature_set_full & WIFI_FEATURE_NAN) |
10981                  (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10982                  (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10983                  (feature_set_full & WIFI_FEATURE_TDLS) |
10984                  (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
10985                  (feature_set_full & WIFI_FEATURE_EPR);
10986         *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10987
10988         return ret;
10989 }
10990 #ifdef CUSTOM_FORCE_NODFS_FLAG
10991 int
10992 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
10993 {
10994         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10995
10996         if (nodfs)
10997                 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
10998         else
10999                 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
11000         dhd->pub.force_country_change = TRUE;
11001         return 0;
11002 }
11003 #endif /* CUSTOM_FORCE_NODFS_FLAG */
11004 #ifdef PNO_SUPPORT
11005 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
11006 int
11007 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
11008 {
11009         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11010
11011         return (dhd_pno_stop_for_ssid(&dhd->pub));
11012 }
11013 /* Linux wrapper to call common dhd_pno_set_for_ssid */
11014 int
11015 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
11016         uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
11017 {
11018         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11019
11020         return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
11021                 pno_repeat, pno_freq_expo_max, channel_list, nchan));
11022 }
11023
11024 /* Linux wrapper to call common dhd_pno_enable */
11025 int
11026 dhd_dev_pno_enable(struct net_device *dev, int enable)
11027 {
11028         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11029
11030         return (dhd_pno_enable(&dhd->pub, enable));
11031 }
11032
11033 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
11034 int
11035 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
11036         struct dhd_pno_hotlist_params *hotlist_params)
11037 {
11038         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11039         return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
11040 }
11041 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
11042 int
11043 dhd_dev_pno_stop_for_batch(struct net_device *dev)
11044 {
11045         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11046         return (dhd_pno_stop_for_batch(&dhd->pub));
11047 }
11048 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
11049 int
11050 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
11051 {
11052         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11053         return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
11054 }
11055 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
11056 int
11057 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
11058 {
11059         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11060         return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
11061 }
11062 /* Linux wrapper to call common dhd_pno_set_mac_oui */
11063 int
11064 dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
11065 {
11066         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11067         return (dhd_pno_set_mac_oui(&dhd->pub, oui));
11068 }
11069 #endif /* PNO_SUPPORT */
11070
11071 #if defined(PNO_SUPPORT)
11072 #ifdef GSCAN_SUPPORT
11073 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11074 int
11075 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11076  void *buf, uint8 flush)
11077 {
11078         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11079
11080         return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
11081 }
11082
11083 /* Linux wrapper to call common dhd_pno_get_gscan */
11084 void *
11085 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11086                       void *info, uint32 *len)
11087 {
11088         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11089
11090         return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
11091 }
11092
11093 /* Linux wrapper to call common dhd_wait_batch_results_complete */
11094 void
11095 dhd_dev_wait_batch_results_complete(struct net_device *dev)
11096 {
11097         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11098
11099         return (dhd_wait_batch_results_complete(&dhd->pub));
11100 }
11101
11102 /* Linux wrapper to call common dhd_pno_lock_batch_results */
11103 void
11104 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
11105 {
11106         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11107
11108         return (dhd_pno_lock_batch_results(&dhd->pub));
11109 }
11110 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
11111 void
11112 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
11113 {
11114         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11115
11116         return (dhd_pno_unlock_batch_results(&dhd->pub));
11117 }
11118
11119 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
11120 int
11121 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
11122 {
11123         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11124
11125         return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
11126 }
11127
11128 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
11129 int
11130 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
11131 {
11132         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11133
11134         return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
11135 }
11136
11137 /* Linux wrapper to call common dhd_handle_swc_evt */
11138 void *
11139 dhd_dev_swc_scan_event(struct net_device *dev, const void  *data, int *send_evt_bytes)
11140 {
11141         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11142
11143         return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
11144 }
11145
11146 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
11147 void *
11148 dhd_dev_hotlist_scan_event(struct net_device *dev,
11149       const void  *data, int *send_evt_bytes, hotlist_type_t type)
11150 {
11151         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11152
11153         return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
11154 }
11155
11156 /* Linux wrapper to call common dhd_process_full_gscan_result */
11157 void *
11158 dhd_dev_process_full_gscan_result(struct net_device *dev,
11159 const void  *data, int *send_evt_bytes)
11160 {
11161         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11162
11163         return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
11164 }
11165
11166 void
11167 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
11168 {
11169         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11170
11171         dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
11172
11173         return;
11174 }
11175
11176 int
11177 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
11178 {
11179         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11180
11181         return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
11182 }
11183
11184 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
11185 int
11186 dhd_dev_retrieve_batch_scan(struct net_device *dev)
11187 {
11188         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11189
11190         return (dhd_retreive_batch_scan_results(&dhd->pub));
11191 }
11192 #endif /* GSCAN_SUPPORT */
11193 #endif 
11194 #ifdef RTT_SUPPORT
11195 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11196 int
11197 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
11198 {
11199         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11200
11201         return (dhd_rtt_set_cfg(&dhd->pub, buf));
11202 }
11203 int
11204 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
11205 {
11206         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11207
11208         return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
11209 }
11210 int
11211 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
11212 {
11213         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11214
11215         return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
11216 }
11217 int
11218 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
11219 {
11220         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11221
11222         return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
11223 }
11224
11225 int
11226 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
11227 {
11228         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11229
11230         return (dhd_rtt_capability(&dhd->pub, capa));
11231 }
11232
11233 #endif /* RTT_SUPPORT */
11234
11235 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11236 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
11237 {
11238         dhd_info_t *dhd;
11239         struct net_device *dev;
11240
11241         dhd = (dhd_info_t *)dhd_info;
11242         dev = dhd->iflist[0]->net;
11243
11244         if (dev) {
11245 #if defined(WL_WIRELESS_EXT)
11246                 wl_iw_send_priv_event(dev, "HANG");
11247 #endif
11248 #if defined(WL_CFG80211)
11249                 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11250 #endif
11251         }
11252 }
11253
11254 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
11255 extern dhd_pub_t *link_recovery;
11256 void dhd_host_recover_link(void)
11257 {
11258         DHD_ERROR(("****** %s ******\n", __FUNCTION__));
11259         link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
11260         dhd_bus_set_linkdown(link_recovery, TRUE);
11261         dhd_os_send_hang_message(link_recovery);
11262 }
11263 EXPORT_SYMBOL(dhd_host_recover_link);
11264 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
11265
11266 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
11267 {
11268         int ret = 0;
11269         if (dhdp) {
11270                 if (!dhdp->hang_was_sent) {
11271                         dhdp->hang_was_sent = 1;
11272                         dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
11273                                 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
11274                         DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d s=%d\n", __FUNCTION__,
11275                                 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
11276                 }
11277         }
11278         return ret;
11279 }
11280
11281 int net_os_send_hang_message(struct net_device *dev)
11282 {
11283         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11284         int ret = 0;
11285
11286         if (dhd) {
11287                 /* Report FW problem when enabled */
11288                 if (dhd->pub.hang_report) {
11289 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11290                         ret = dhd_os_send_hang_message(&dhd->pub);
11291 #else
11292                         ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11293 #endif
11294                 } else {
11295                         DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
11296                                 __FUNCTION__));
11297                         /* Enforce bus down to stop any future traffic */
11298                         dhd->pub.busstate = DHD_BUS_DOWN;
11299                 }
11300         }
11301         return ret;
11302 }
11303
11304 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
11305 {
11306         dhd_info_t *dhd = NULL;
11307         dhd_pub_t *dhdp = NULL;
11308         int reason;
11309
11310         dhd = DHD_DEV_INFO(dev);
11311         if (dhd) {
11312                 dhdp = &dhd->pub;
11313         }
11314
11315         if (!dhd || !dhdp) {
11316                 return 0;
11317         }
11318
11319         reason = bcm_strtoul(string_num, NULL, 0);
11320         DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
11321
11322         if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
11323                 reason = 0;
11324         }
11325
11326         dhdp->hang_reason = reason;
11327
11328         return net_os_send_hang_message(dev);
11329 }
11330 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
11331
11332
11333 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
11334 {
11335         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11336         return wifi_platform_set_power(dhd->adapter, on, delay_msec);
11337 }
11338
11339 bool dhd_force_country_change(struct net_device *dev)
11340 {
11341         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11342
11343         if (dhd && dhd->pub.up)
11344                 return dhd->pub.force_country_change;
11345         return FALSE;
11346 }
11347 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
11348         wl_country_t *cspec)
11349 {
11350         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11351 #ifdef CUSTOM_COUNTRY_CODE
11352         get_customized_country_code(dhd->adapter, country_iso_code, cspec,
11353                         dhd->pub.dhd_cflags);
11354 #else
11355         get_customized_country_code(dhd->adapter, country_iso_code, cspec);
11356 #endif /* CUSTOM_COUNTRY_CODE */
11357 }
11358 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
11359 {
11360         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11361         if (dhd && dhd->pub.up) {
11362                 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
11363 #ifdef WL_CFG80211
11364                 wl_update_wiphybands(NULL, notify);
11365 #endif
11366         }
11367 }
11368
11369 void dhd_bus_band_set(struct net_device *dev, uint band)
11370 {
11371         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11372         if (dhd && dhd->pub.up) {
11373 #ifdef WL_CFG80211
11374                 wl_update_wiphybands(NULL, true);
11375 #endif
11376         }
11377 }
11378
11379 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
11380 {
11381         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11382
11383         if (!fw || fw[0] == '\0')
11384                 return -EINVAL;
11385
11386         strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
11387         dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
11388
11389 #if defined(SOFTAP)
11390         if (strstr(fw, "apsta") != NULL) {
11391                 DHD_INFO(("GOT APSTA FIRMWARE\n"));
11392                 ap_fw_loaded = TRUE;
11393         } else {
11394                 DHD_INFO(("GOT STA FIRMWARE\n"));
11395                 ap_fw_loaded = FALSE;
11396         }
11397 #endif 
11398         return 0;
11399 }
11400
11401 void dhd_net_if_lock(struct net_device *dev)
11402 {
11403         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11404         dhd_net_if_lock_local(dhd);
11405 }
11406
11407 void dhd_net_if_unlock(struct net_device *dev)
11408 {
11409         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11410         dhd_net_if_unlock_local(dhd);
11411 }
11412
11413 static void dhd_net_if_lock_local(dhd_info_t *dhd)
11414 {
11415 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11416         if (dhd)
11417                 mutex_lock(&dhd->dhd_net_if_mutex);
11418 #endif
11419 }
11420
11421 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
11422 {
11423 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11424         if (dhd)
11425                 mutex_unlock(&dhd->dhd_net_if_mutex);
11426 #endif
11427 }
11428
11429 static void dhd_suspend_lock(dhd_pub_t *pub)
11430 {
11431 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11432         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11433         if (dhd)
11434                 mutex_lock(&dhd->dhd_suspend_mutex);
11435 #endif
11436 }
11437
11438 static void dhd_suspend_unlock(dhd_pub_t *pub)
11439 {
11440 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11441         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11442         if (dhd)
11443                 mutex_unlock(&dhd->dhd_suspend_mutex);
11444 #endif
11445 }
11446
11447 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
11448 {
11449         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11450         unsigned long flags = 0;
11451
11452         if (dhd)
11453                 spin_lock_irqsave(&dhd->dhd_lock, flags);
11454
11455         return flags;
11456 }
11457
11458 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
11459 {
11460         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11461
11462         if (dhd)
11463                 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
11464 }
11465
11466 /* Linux specific multipurpose spinlock API */
11467 void *
11468 dhd_os_spin_lock_init(osl_t *osh)
11469 {
11470         /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
11471         /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
11472         /* and this results in kernel asserts in internal builds */
11473         spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
11474         if (lock)
11475                 spin_lock_init(lock);
11476         return ((void *)lock);
11477 }
11478 void
11479 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
11480 {
11481         if (lock)
11482                 MFREE(osh, lock, sizeof(spinlock_t) + 4);
11483 }
11484 unsigned long
11485 dhd_os_spin_lock(void *lock)
11486 {
11487         unsigned long flags = 0;
11488
11489         if (lock)
11490                 spin_lock_irqsave((spinlock_t *)lock, flags);
11491
11492         return flags;
11493 }
11494 void
11495 dhd_os_spin_unlock(void *lock, unsigned long flags)
11496 {
11497         if (lock)
11498                 spin_unlock_irqrestore((spinlock_t *)lock, flags);
11499 }
11500
11501 static int
11502 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
11503 {
11504         return (atomic_read(&dhd->pend_8021x_cnt));
11505 }
11506
11507 #define MAX_WAIT_FOR_8021X_TX   100
11508
11509 int
11510 dhd_wait_pend8021x(struct net_device *dev)
11511 {
11512         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11513         int timeout = msecs_to_jiffies(10);
11514         int ntimes = MAX_WAIT_FOR_8021X_TX;
11515         int pend = dhd_get_pend_8021x_cnt(dhd);
11516
11517         while (ntimes && pend) {
11518                 if (pend) {
11519                         set_current_state(TASK_INTERRUPTIBLE);
11520                         DHD_PERIM_UNLOCK(&dhd->pub);
11521                         schedule_timeout(timeout);
11522                         DHD_PERIM_LOCK(&dhd->pub);
11523                         set_current_state(TASK_RUNNING);
11524                         ntimes--;
11525                 }
11526                 pend = dhd_get_pend_8021x_cnt(dhd);
11527         }
11528         if (ntimes == 0)
11529         {
11530                 atomic_set(&dhd->pend_8021x_cnt, 0);
11531                 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
11532         }
11533         return pend;
11534 }
11535
11536 #ifdef DHD_DEBUG
11537 static void
11538 dhd_convert_memdump_type_to_str(uint32 type, char *buf)
11539 {
11540         char *type_str = NULL;
11541
11542         switch (type) {
11543                 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
11544                         type_str = "resumed_on_timeout";
11545                         break;
11546                 case DUMP_TYPE_D3_ACK_TIMEOUT:
11547                         type_str = "D3_ACK_timeout";
11548                         break;
11549                 case DUMP_TYPE_DONGLE_TRAP:
11550                         type_str = "Dongle_Trap";
11551                         break;
11552                 case DUMP_TYPE_MEMORY_CORRUPTION:
11553                         type_str = "Memory_Corruption";
11554                         break;
11555                 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
11556                         type_str = "PKTID_AUDIT_Fail";
11557                         break;
11558                 case DUMP_TYPE_SCAN_TIMEOUT:
11559                         type_str = "SCAN_timeout";
11560                         break;
11561                 case DUMP_TYPE_SCAN_BUSY:
11562                         type_str = "SCAN_Busy";
11563                         break;
11564                 case DUMP_TYPE_BY_SYSDUMP:
11565                         type_str = "BY_SYSDUMP";
11566                         break;
11567                 case DUMP_TYPE_BY_LIVELOCK:
11568                         type_str = "BY_LIVELOCK";
11569                         break;
11570                 case DUMP_TYPE_AP_LINKUP_FAILURE:
11571                         type_str = "BY_AP_LINK_FAILURE";
11572                         break;
11573                 default:
11574                         type_str = "Unknown_type";
11575                         break;
11576         }
11577
11578         strncpy(buf, type_str, strlen(type_str));
11579         buf[strlen(type_str)] = 0;
11580 }
11581
11582 int
11583 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
11584 {
11585         int ret = 0;
11586         struct file *fp = NULL;
11587         mm_segment_t old_fs;
11588         loff_t pos = 0;
11589         char memdump_path[128];
11590         char memdump_type[32];
11591         struct timeval curtime;
11592         uint32 file_mode;
11593
11594         /* change to KERNEL_DS address limit */
11595         old_fs = get_fs();
11596         set_fs(KERNEL_DS);
11597
11598         /* Init file name */
11599         memset(memdump_path, 0, sizeof(memdump_path));
11600         memset(memdump_type, 0, sizeof(memdump_type));
11601         do_gettimeofday(&curtime);
11602         dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
11603 #ifdef CUSTOMER_HW4_DEBUG
11604         snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11605                 DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
11606                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11607         file_mode = O_CREAT | O_WRONLY | O_SYNC;
11608 #elif defined(CUSTOMER_HW2)
11609         snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11610                 "/data/misc/wifi/mem_dump", memdump_type,
11611                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11612         file_mode = O_CREAT | O_WRONLY | O_SYNC;
11613 #else
11614         snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11615                 "/installmedia/mem_dump", memdump_type,
11616                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11617         /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
11618          * calling BUG_ON immediately after collecting the socram dump.
11619          * So the file write operation should directly write the contents into the
11620          * file instead of caching it. O_TRUNC flag ensures that file will be re-written
11621          * instead of appending.
11622          */
11623         file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
11624 #endif /* CUSTOMER_HW4_DEBUG */
11625
11626         /* print SOCRAM dump file path */
11627         DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
11628
11629         /* open file to write */
11630         fp = filp_open(memdump_path, file_mode, 0644);
11631         if (IS_ERR(fp)) {
11632                 ret = PTR_ERR(fp);
11633                 printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
11634                 goto exit;
11635         }
11636
11637         /* Write buf to file */
11638         fp->f_op->write(fp, buf, size, &pos);
11639
11640 exit:
11641         /* close file before return */
11642         if (!ret)
11643                 filp_close(fp, current->files);
11644
11645         /* restore previous address limit */
11646         set_fs(old_fs);
11647
11648         /* free buf before return */
11649 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
11650         DHD_OS_PREFREE(dhd, buf, size);
11651 #else
11652         MFREE(dhd->osh, buf, size);
11653 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
11654
11655         return ret;
11656 }
11657 #endif /* DHD_DEBUG */
11658
11659 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
11660 {
11661         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11662         unsigned long flags;
11663         int ret = 0;
11664
11665         if (dhd) {
11666                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11667                 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
11668                         dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
11669 #ifdef CONFIG_HAS_WAKELOCK
11670                 if (dhd->wakelock_rx_timeout_enable)
11671                         wake_lock_timeout(&dhd->wl_rxwake,
11672                                 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
11673                 if (dhd->wakelock_ctrl_timeout_enable)
11674                         wake_lock_timeout(&dhd->wl_ctrlwake,
11675                                 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
11676 #endif
11677                 dhd->wakelock_rx_timeout_enable = 0;
11678                 dhd->wakelock_ctrl_timeout_enable = 0;
11679                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11680         }
11681         return ret;
11682 }
11683
11684 int net_os_wake_lock_timeout(struct net_device *dev)
11685 {
11686         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11687         int ret = 0;
11688
11689         if (dhd)
11690                 ret = dhd_os_wake_lock_timeout(&dhd->pub);
11691         return ret;
11692 }
11693
11694 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
11695 {
11696         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11697         unsigned long flags;
11698
11699         if (dhd) {
11700                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11701                 if (val > dhd->wakelock_rx_timeout_enable)
11702                         dhd->wakelock_rx_timeout_enable = val;
11703                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11704         }
11705         return 0;
11706 }
11707
11708 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
11709 {
11710         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11711         unsigned long flags;
11712
11713         if (dhd) {
11714                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11715                 if (val > dhd->wakelock_ctrl_timeout_enable)
11716                         dhd->wakelock_ctrl_timeout_enable = val;
11717                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11718         }
11719         return 0;
11720 }
11721
11722 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
11723 {
11724         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11725         unsigned long flags;
11726
11727         if (dhd) {
11728                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11729                 dhd->wakelock_ctrl_timeout_enable = 0;
11730 #ifdef CONFIG_HAS_WAKELOCK
11731                 if (wake_lock_active(&dhd->wl_ctrlwake))
11732                         wake_unlock(&dhd->wl_ctrlwake);
11733 #endif
11734                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11735         }
11736         return 0;
11737 }
11738
11739 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
11740 {
11741         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11742         int ret = 0;
11743
11744         if (dhd)
11745                 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
11746         return ret;
11747 }
11748
11749 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
11750 {
11751         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11752         int ret = 0;
11753
11754         if (dhd)
11755                 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
11756         return ret;
11757 }
11758
11759
11760 #if defined(DHD_TRACE_WAKE_LOCK)
11761 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11762 #include <linux/hashtable.h>
11763 #else
11764 #include <linux/hash.h>
11765 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11766
11767
11768 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11769 /* Define 2^5 = 32 bucket size hash table */
11770 DEFINE_HASHTABLE(wklock_history, 5);
11771 #else
11772 /* Define 2^5 = 32 bucket size hash table */
11773 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
11774 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11775
11776 int trace_wklock_onoff = 1;
11777
11778 typedef enum dhd_wklock_type {
11779         DHD_WAKE_LOCK,
11780         DHD_WAKE_UNLOCK,
11781         DHD_WAIVE_LOCK,
11782         DHD_RESTORE_LOCK
11783 } dhd_wklock_t;
11784
11785 struct wk_trace_record {
11786         unsigned long addr;                 /* Address of the instruction */
11787         dhd_wklock_t lock_type;         /* lock_type */
11788         unsigned long long counter;             /* counter information */
11789         struct hlist_node wklock_node;  /* hash node */
11790 };
11791
11792
11793 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
11794 {
11795         struct wk_trace_record *wklock_info;
11796 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11797         hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
11798 #else
11799         struct hlist_node *entry;
11800         int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
11801         hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
11802 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11803         {
11804                 if (wklock_info->addr == addr) {
11805                         return wklock_info;
11806                 }
11807         }
11808         return NULL;
11809 }
11810
11811 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11812 #define HASH_ADD(hashtable, node, key) \
11813         do { \
11814                 hash_add(hashtable, node, key); \
11815         } while (0);
11816 #else
11817 #define HASH_ADD(hashtable, node, key) \
11818         do { \
11819                 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
11820                 hlist_add_head(node, &hashtable[index]); \
11821         } while (0);
11822 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
11823
11824 #define STORE_WKLOCK_RECORD(wklock_type) \
11825         do { \
11826                 struct wk_trace_record *wklock_info = NULL; \
11827                 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
11828                 wklock_info = find_wklock_entry(func_addr); \
11829                 if (wklock_info) { \
11830                         if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
11831                                 wklock_info->counter = dhd->wakelock_counter; \
11832                         } else { \
11833                                 wklock_info->counter++; \
11834                         } \
11835                 } else { \
11836                         wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
11837                         if (!wklock_info) {\
11838                                 printk("Can't allocate wk_trace_record \n"); \
11839                         } else { \
11840                                 wklock_info->addr = func_addr; \
11841                                 wklock_info->lock_type = wklock_type; \
11842                                 if (wklock_type == DHD_WAIVE_LOCK || \
11843                                                 wklock_type == DHD_RESTORE_LOCK) { \
11844                                         wklock_info->counter = dhd->wakelock_counter; \
11845                                 } else { \
11846                                         wklock_info->counter++; \
11847                                 } \
11848                                 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
11849                         } \
11850                 } \
11851         } while (0);
11852
11853 static inline void dhd_wk_lock_rec_dump(void)
11854 {
11855         int bkt;
11856         struct wk_trace_record *wklock_info;
11857
11858 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11859         hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
11860 #else
11861         struct hlist_node *entry = NULL;
11862         int max_index = ARRAY_SIZE(wklock_history);
11863         for (bkt = 0; bkt < max_index; bkt++)
11864                 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
11865 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11866                 {
11867                         switch (wklock_info->lock_type) {
11868                                 case DHD_WAKE_LOCK:
11869                                         DHD_ERROR(("wakelock lock : %pS  lock_counter : %llu\n",
11870                                                 (void *)wklock_info->addr, wklock_info->counter));
11871                                         break;
11872                                 case DHD_WAKE_UNLOCK:
11873                                         DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
11874                                                 (void *)wklock_info->addr, wklock_info->counter));
11875                                         break;
11876                                 case DHD_WAIVE_LOCK:
11877                                         DHD_ERROR(("wakelock waive : %pS  before_waive : %llu\n",
11878                                                 (void *)wklock_info->addr, wklock_info->counter));
11879                                         break;
11880                                 case DHD_RESTORE_LOCK:
11881                                         DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
11882                                                 (void *)wklock_info->addr, wklock_info->counter));
11883                                         break;
11884                         }
11885                 }
11886 }
11887
11888 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
11889 {
11890         unsigned long flags;
11891 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11892         int i;
11893 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11894
11895         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11896 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11897         hash_init(wklock_history);
11898 #else
11899         for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
11900                 INIT_HLIST_HEAD(&wklock_history[i]);
11901 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11902         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11903 }
11904
11905 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
11906 {
11907         int bkt;
11908         struct wk_trace_record *wklock_info;
11909         struct hlist_node *tmp;
11910         unsigned long flags;
11911 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11912         struct hlist_node *entry = NULL;
11913         int max_index = ARRAY_SIZE(wklock_history);
11914 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11915
11916         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11917 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11918         hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
11919 #else
11920         for (bkt = 0; bkt < max_index; bkt++)
11921                 hlist_for_each_entry_safe(wklock_info, entry, tmp,
11922                         &wklock_history[bkt], wklock_node)
11923 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11924                 {
11925 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11926                         hash_del(&wklock_info->wklock_node);
11927 #else
11928                         hlist_del_init(&wklock_info->wklock_node);
11929 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11930                         kfree(wklock_info);
11931                 }
11932         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11933 }
11934
11935 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
11936 {
11937         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
11938         unsigned long flags;
11939
11940         DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
11941         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11942         dhd_wk_lock_rec_dump();
11943         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11944         DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
11945 }
11946 #else
11947 #define STORE_WKLOCK_RECORD(wklock_type)
11948 #endif /* ! DHD_TRACE_WAKE_LOCK */
11949
11950 int dhd_os_wake_lock(dhd_pub_t *pub)
11951 {
11952         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11953         unsigned long flags;
11954         int ret = 0;
11955
11956         if (dhd) {
11957                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11958                 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
11959 #ifdef CONFIG_HAS_WAKELOCK
11960                         wake_lock(&dhd->wl_wifi);
11961 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
11962                         dhd_bus_dev_pm_stay_awake(pub);
11963 #endif
11964                 }
11965 #ifdef DHD_TRACE_WAKE_LOCK
11966                 if (trace_wklock_onoff) {
11967                         STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
11968                 }
11969 #endif /* DHD_TRACE_WAKE_LOCK */
11970                 dhd->wakelock_counter++;
11971                 ret = dhd->wakelock_counter;
11972                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11973         }
11974
11975         return ret;
11976 }
11977
11978 int dhd_event_wake_lock(dhd_pub_t *pub)
11979 {
11980         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11981         unsigned long flags;
11982         int ret = 0;
11983
11984         if (dhd) {
11985                 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
11986                 if (dhd->wakelock_event_counter == 0) {
11987 #ifdef CONFIG_HAS_WAKELOCK
11988                         wake_lock(&dhd->wl_evtwake);
11989 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
11990                         dhd_bus_dev_pm_stay_awake(pub);
11991 #endif
11992                 }
11993                 dhd->wakelock_event_counter++;
11994                 ret = dhd->wakelock_event_counter;
11995                 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
11996         }
11997
11998         return ret;
11999 }
12000
12001 int net_os_wake_lock(struct net_device *dev)
12002 {
12003         dhd_info_t *dhd = DHD_DEV_INFO(dev);
12004         int ret = 0;
12005
12006         if (dhd)
12007                 ret = dhd_os_wake_lock(&dhd->pub);
12008         return ret;
12009 }
12010
12011 int dhd_os_wake_unlock(dhd_pub_t *pub)
12012 {
12013         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12014         unsigned long flags;
12015         int ret = 0;
12016
12017         dhd_os_wake_lock_timeout(pub);
12018         if (dhd) {
12019                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12020
12021                 if (dhd->wakelock_counter > 0) {
12022                         dhd->wakelock_counter--;
12023 #ifdef DHD_TRACE_WAKE_LOCK
12024                         if (trace_wklock_onoff) {
12025                                 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
12026                         }
12027 #endif /* DHD_TRACE_WAKE_LOCK */
12028                         if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12029 #ifdef CONFIG_HAS_WAKELOCK
12030                                 wake_unlock(&dhd->wl_wifi);
12031 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12032                                 dhd_bus_dev_pm_relax(pub);
12033 #endif
12034                         }
12035                         ret = dhd->wakelock_counter;
12036                 }
12037                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12038         }
12039         return ret;
12040 }
12041
12042 int dhd_event_wake_unlock(dhd_pub_t *pub)
12043 {
12044         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12045         unsigned long flags;
12046         int ret = 0;
12047
12048         if (dhd) {
12049                 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12050
12051                 if (dhd->wakelock_event_counter > 0) {
12052                         dhd->wakelock_event_counter--;
12053                         if (dhd->wakelock_event_counter == 0) {
12054 #ifdef CONFIG_HAS_WAKELOCK
12055                                 wake_unlock(&dhd->wl_evtwake);
12056 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12057                                 dhd_bus_dev_pm_relax(pub);
12058 #endif
12059                         }
12060                         ret = dhd->wakelock_event_counter;
12061                 }
12062                 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12063         }
12064         return ret;
12065 }
12066
12067 int dhd_os_check_wakelock(dhd_pub_t *pub)
12068 {
12069 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12070         KERNEL_VERSION(2, 6, 36)))
12071         dhd_info_t *dhd;
12072
12073         if (!pub)
12074                 return 0;
12075         dhd = (dhd_info_t *)(pub->info);
12076 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12077
12078 #ifdef CONFIG_HAS_WAKELOCK
12079         /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
12080         if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
12081                 (wake_lock_active(&dhd->wl_wdwake))))
12082                 return 1;
12083 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12084         if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
12085                 return 1;
12086 #endif
12087         return 0;
12088 }
12089
12090 int
12091 dhd_os_check_wakelock_all(dhd_pub_t *pub)
12092 {
12093 #ifdef CONFIG_HAS_WAKELOCK
12094         int l1, l2, l3, l4, l7;
12095         int l5 = 0, l6 = 0;
12096         int c, lock_active;
12097 #endif /* CONFIG_HAS_WAKELOCK */
12098 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12099         KERNEL_VERSION(2, 6, 36)))
12100         dhd_info_t *dhd;
12101
12102         if (!pub) {
12103                 return 0;
12104         }
12105         dhd = (dhd_info_t *)(pub->info);
12106         if (!dhd) {
12107                 return 0;
12108         }
12109 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12110
12111 #ifdef CONFIG_HAS_WAKELOCK
12112         c = dhd->wakelock_counter;
12113         l1 = wake_lock_active(&dhd->wl_wifi);
12114         l2 = wake_lock_active(&dhd->wl_wdwake);
12115         l3 = wake_lock_active(&dhd->wl_rxwake);
12116         l4 = wake_lock_active(&dhd->wl_ctrlwake);
12117 #ifdef BCMPCIE_OOB_HOST_WAKE
12118         l5 = wake_lock_active(&dhd->wl_intrwake);
12119 #endif /* BCMPCIE_OOB_HOST_WAKE */
12120 #ifdef DHD_USE_SCAN_WAKELOCK
12121         l6 = wake_lock_active(&dhd->wl_scanwake);
12122 #endif /* DHD_USE_SCAN_WAKELOCK */
12123         l7 = wake_lock_active(&dhd->wl_evtwake);
12124         lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
12125
12126         /* Indicate to the Host to avoid going to suspend if internal locks are up */
12127         if (dhd && lock_active) {
12128                 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
12129                         "ctl-%d intr-%d scan-%d evt-%d\n",
12130                         __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
12131                 return 1;
12132         }
12133 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12134         if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
12135                 return 1;
12136         }
12137 #endif /* CONFIG_HAS_WAKELOCK */
12138         return 0;
12139 }
12140
12141 int net_os_wake_unlock(struct net_device *dev)
12142 {
12143         dhd_info_t *dhd = DHD_DEV_INFO(dev);
12144         int ret = 0;
12145
12146         if (dhd)
12147                 ret = dhd_os_wake_unlock(&dhd->pub);
12148         return ret;
12149 }
12150
12151 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
12152 {
12153         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12154         unsigned long flags;
12155         int ret = 0;
12156
12157         if (dhd) {
12158                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12159 #ifdef CONFIG_HAS_WAKELOCK
12160                 /* if wakelock_wd_counter was never used : lock it at once */
12161                 if (!dhd->wakelock_wd_counter)
12162                         wake_lock(&dhd->wl_wdwake);
12163 #endif
12164                 dhd->wakelock_wd_counter++;
12165                 ret = dhd->wakelock_wd_counter;
12166                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12167         }
12168         return ret;
12169 }
12170
12171 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
12172 {
12173         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12174         unsigned long flags;
12175         int ret = 0;
12176
12177         if (dhd) {
12178                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12179                 if (dhd->wakelock_wd_counter) {
12180                         dhd->wakelock_wd_counter = 0;
12181 #ifdef CONFIG_HAS_WAKELOCK
12182                         wake_unlock(&dhd->wl_wdwake);
12183 #endif
12184                 }
12185                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12186         }
12187         return ret;
12188 }
12189
12190 #ifdef BCMPCIE_OOB_HOST_WAKE
12191 void
12192 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
12193 {
12194 #ifdef CONFIG_HAS_WAKELOCK
12195         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12196
12197         if (dhd) {
12198                 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
12199         }
12200 #endif /* CONFIG_HAS_WAKELOCK */
12201 }
12202
12203 void
12204 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
12205 {
12206 #ifdef CONFIG_HAS_WAKELOCK
12207         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12208
12209         if (dhd) {
12210                 /* if wl_intrwake is active, unlock it */
12211                 if (wake_lock_active(&dhd->wl_intrwake)) {
12212                         wake_unlock(&dhd->wl_intrwake);
12213                 }
12214         }
12215 #endif /* CONFIG_HAS_WAKELOCK */
12216 }
12217 #endif /* BCMPCIE_OOB_HOST_WAKE */
12218
12219 #ifdef DHD_USE_SCAN_WAKELOCK
12220 void
12221 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
12222 {
12223 #ifdef CONFIG_HAS_WAKELOCK
12224         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12225
12226         if (dhd) {
12227                 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
12228         }
12229 #endif /* CONFIG_HAS_WAKELOCK */
12230 }
12231
12232 void
12233 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
12234 {
12235 #ifdef CONFIG_HAS_WAKELOCK
12236         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12237
12238         if (dhd) {
12239                 /* if wl_scanwake is active, unlock it */
12240                 if (wake_lock_active(&dhd->wl_scanwake)) {
12241                         wake_unlock(&dhd->wl_scanwake);
12242                 }
12243         }
12244 #endif /* CONFIG_HAS_WAKELOCK */
12245 }
12246 #endif /* DHD_USE_SCAN_WAKELOCK */
12247
12248 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
12249  * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
12250  */
12251 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
12252 {
12253         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12254         unsigned long flags;
12255         int ret = 0;
12256
12257         if (dhd) {
12258                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12259
12260                 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12261                 if (dhd->waive_wakelock == FALSE) {
12262 #ifdef DHD_TRACE_WAKE_LOCK
12263                         if (trace_wklock_onoff) {
12264                                 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
12265                         }
12266 #endif /* DHD_TRACE_WAKE_LOCK */
12267                         /* record current lock status */
12268                         dhd->wakelock_before_waive = dhd->wakelock_counter;
12269                         dhd->waive_wakelock = TRUE;
12270                 }
12271                 ret = dhd->wakelock_wd_counter;
12272                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12273         }
12274         return ret;
12275 }
12276
12277 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
12278 {
12279         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12280         unsigned long flags;
12281         int ret = 0;
12282
12283         if (!dhd)
12284                 return 0;
12285
12286         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12287
12288         /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12289         if (!dhd->waive_wakelock)
12290                 goto exit;
12291
12292         dhd->waive_wakelock = FALSE;
12293         /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
12294          * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
12295          * the lock in between, do the same by calling wake_unlock or pm_relax
12296          */
12297 #ifdef DHD_TRACE_WAKE_LOCK
12298         if (trace_wklock_onoff) {
12299                 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
12300         }
12301 #endif /* DHD_TRACE_WAKE_LOCK */
12302
12303         if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
12304 #ifdef CONFIG_HAS_WAKELOCK
12305                 wake_lock(&dhd->wl_wifi);
12306 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12307                 dhd_bus_dev_pm_stay_awake(&dhd->pub);
12308 #endif
12309         } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
12310 #ifdef CONFIG_HAS_WAKELOCK
12311                 wake_unlock(&dhd->wl_wifi);
12312 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12313                 dhd_bus_dev_pm_relax(&dhd->pub);
12314 #endif
12315         }
12316         dhd->wakelock_before_waive = 0;
12317 exit:
12318         ret = dhd->wakelock_wd_counter;
12319         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12320         return ret;
12321 }
12322
12323 void dhd_os_wake_lock_init(struct dhd_info *dhd)
12324 {
12325         DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
12326         dhd->wakelock_event_counter = 0;
12327         dhd->wakelock_counter = 0;
12328         dhd->wakelock_rx_timeout_enable = 0;
12329         dhd->wakelock_ctrl_timeout_enable = 0;
12330 #ifdef CONFIG_HAS_WAKELOCK
12331         wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
12332         wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
12333         wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
12334         wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
12335 #ifdef BCMPCIE_OOB_HOST_WAKE
12336         wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
12337 #endif /* BCMPCIE_OOB_HOST_WAKE */
12338 #ifdef DHD_USE_SCAN_WAKELOCK
12339         wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
12340 #endif /* DHD_USE_SCAN_WAKELOCK */
12341 #endif /* CONFIG_HAS_WAKELOCK */
12342 #ifdef DHD_TRACE_WAKE_LOCK
12343         dhd_wk_lock_trace_init(dhd);
12344 #endif /* DHD_TRACE_WAKE_LOCK */
12345 }
12346
12347 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
12348 {
12349         DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
12350 #ifdef CONFIG_HAS_WAKELOCK
12351         dhd->wakelock_event_counter = 0;
12352         dhd->wakelock_counter = 0;
12353         dhd->wakelock_rx_timeout_enable = 0;
12354         dhd->wakelock_ctrl_timeout_enable = 0;
12355         wake_lock_destroy(&dhd->wl_wifi);
12356         wake_lock_destroy(&dhd->wl_rxwake);
12357         wake_lock_destroy(&dhd->wl_ctrlwake);
12358         wake_lock_destroy(&dhd->wl_evtwake);
12359 #ifdef BCMPCIE_OOB_HOST_WAKE
12360         wake_lock_destroy(&dhd->wl_intrwake);
12361 #endif /* BCMPCIE_OOB_HOST_WAKE */
12362 #ifdef DHD_USE_SCAN_WAKELOCK
12363         wake_lock_destroy(&dhd->wl_scanwake);
12364 #endif /* DHD_USE_SCAN_WAKELOCK */
12365 #ifdef DHD_TRACE_WAKE_LOCK
12366         dhd_wk_lock_trace_deinit(dhd);
12367 #endif /* DHD_TRACE_WAKE_LOCK */
12368 #endif /* CONFIG_HAS_WAKELOCK */
12369 }
12370
12371 bool dhd_os_check_if_up(dhd_pub_t *pub)
12372 {
12373         if (!pub)
12374                 return FALSE;
12375         return pub->up;
12376 }
12377
12378 /* function to collect firmware, chip id and chip version info */
12379 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
12380 {
12381         int i;
12382
12383         i = snprintf(info_string, sizeof(info_string),
12384                 "  Driver: %s\n  Firmware: %s ", EPI_VERSION_STR, fw);
12385         printf("%s\n", info_string);
12386
12387         if (!dhdp)
12388                 return;
12389
12390         i = snprintf(&info_string[i], sizeof(info_string) - i,
12391                 "\n  Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
12392                 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
12393 }
12394
12395 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
12396 {
12397         int ifidx;
12398         int ret = 0;
12399         dhd_info_t *dhd = NULL;
12400
12401         if (!net || !DEV_PRIV(net)) {
12402                 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
12403                 return -EINVAL;
12404         }
12405
12406         dhd = DHD_DEV_INFO(net);
12407         if (!dhd)
12408                 return -EINVAL;
12409
12410         ifidx = dhd_net2idx(dhd, net);
12411         if (ifidx == DHD_BAD_IF) {
12412                 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
12413                 return -ENODEV;
12414         }
12415
12416         DHD_OS_WAKE_LOCK(&dhd->pub);
12417         DHD_PERIM_LOCK(&dhd->pub);
12418
12419         ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
12420         dhd_check_hang(net, &dhd->pub, ret);
12421
12422         DHD_PERIM_UNLOCK(&dhd->pub);
12423         DHD_OS_WAKE_UNLOCK(&dhd->pub);
12424
12425         return ret;
12426 }
12427
12428 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
12429 {
12430         struct net_device *net;
12431
12432         net = dhd_idx2net(dhdp, ifidx);
12433         if (!net) {
12434                 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
12435                 return -EINVAL;
12436         }
12437
12438         return dhd_check_hang(net, dhdp, ret);
12439 }
12440
12441 /* Return instance */
12442 int dhd_get_instance(dhd_pub_t *dhdp)
12443 {
12444         return dhdp->info->unit;
12445 }
12446
12447
12448 #ifdef PROP_TXSTATUS
12449
12450 void dhd_wlfc_plat_init(void *dhd)
12451 {
12452 #ifdef USE_DYNAMIC_F2_BLKSIZE
12453         dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12454 #endif /* USE_DYNAMIC_F2_BLKSIZE */
12455         return;
12456 }
12457
12458 void dhd_wlfc_plat_deinit(void *dhd)
12459 {
12460 #ifdef USE_DYNAMIC_F2_BLKSIZE
12461         dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
12462 #endif /* USE_DYNAMIC_F2_BLKSIZE */
12463         return;
12464 }
12465
12466 bool dhd_wlfc_skip_fc(void)
12467 {
12468 #ifdef SKIP_WLFC_ON_CONCURRENT
12469 #ifdef WL_CFG80211
12470
12471         /* enable flow control in vsdb mode */
12472         return !(wl_cfg80211_is_concurrent_mode());
12473 #else
12474         return TRUE; /* skip flow control */
12475 #endif /* WL_CFG80211 */
12476
12477 #else
12478         return FALSE;
12479 #endif /* SKIP_WLFC_ON_CONCURRENT */
12480 }
12481 #endif /* PROP_TXSTATUS */
12482
12483 #ifdef BCMDBGFS
12484 #include <linux/debugfs.h>
12485
12486 typedef struct dhd_dbgfs {
12487         struct dentry   *debugfs_dir;
12488         struct dentry   *debugfs_mem;
12489         dhd_pub_t       *dhdp;
12490         uint32          size;
12491 } dhd_dbgfs_t;
12492
12493 dhd_dbgfs_t g_dbgfs;
12494
12495 extern uint32 dhd_readregl(void *bp, uint32 addr);
12496 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
12497
12498 static int
12499 dhd_dbg_state_open(struct inode *inode, struct file *file)
12500 {
12501         file->private_data = inode->i_private;
12502         return 0;
12503 }
12504
12505 static ssize_t
12506 dhd_dbg_state_read(struct file *file, char __user *ubuf,
12507                        size_t count, loff_t *ppos)
12508 {
12509         ssize_t rval;
12510         uint32 tmp;
12511         loff_t pos = *ppos;
12512         size_t ret;
12513
12514         if (pos < 0)
12515                 return -EINVAL;
12516         if (pos >= g_dbgfs.size || !count)
12517                 return 0;
12518         if (count > g_dbgfs.size - pos)
12519                 count = g_dbgfs.size - pos;
12520
12521         /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
12522         tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
12523
12524         ret = copy_to_user(ubuf, &tmp, 4);
12525         if (ret == count)
12526                 return -EFAULT;
12527
12528         count -= ret;
12529         *ppos = pos + count;
12530         rval = count;
12531
12532         return rval;
12533 }
12534
12535
12536 static ssize_t
12537 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
12538 {
12539         loff_t pos = *ppos;
12540         size_t ret;
12541         uint32 buf;
12542
12543         if (pos < 0)
12544                 return -EINVAL;
12545         if (pos >= g_dbgfs.size || !count)
12546                 return 0;
12547         if (count > g_dbgfs.size - pos)
12548                 count = g_dbgfs.size - pos;
12549
12550         ret = copy_from_user(&buf, ubuf, sizeof(uint32));
12551         if (ret == count)
12552                 return -EFAULT;
12553
12554         /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
12555         dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
12556
12557         return count;
12558 }
12559
12560
12561 loff_t
12562 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
12563 {
12564         loff_t pos = -1;
12565
12566         switch (whence) {
12567                 case 0:
12568                         pos = off;
12569                         break;
12570                 case 1:
12571                         pos = file->f_pos + off;
12572                         break;
12573                 case 2:
12574                         pos = g_dbgfs.size - off;
12575         }
12576         return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
12577 }
12578
12579 static const struct file_operations dhd_dbg_state_ops = {
12580         .read   = dhd_dbg_state_read,
12581         .write  = dhd_debugfs_write,
12582         .open   = dhd_dbg_state_open,
12583         .llseek = dhd_debugfs_lseek
12584 };
12585
12586 static void dhd_dbg_create(void)
12587 {
12588         if (g_dbgfs.debugfs_dir) {
12589                 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
12590                         NULL, &dhd_dbg_state_ops);
12591         }
12592 }
12593
12594 void dhd_dbg_init(dhd_pub_t *dhdp)
12595 {
12596         g_dbgfs.dhdp = dhdp;
12597         g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
12598
12599         g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
12600         if (IS_ERR(g_dbgfs.debugfs_dir)) {
12601                 g_dbgfs.debugfs_dir = NULL;
12602                 return;
12603         }
12604
12605         dhd_dbg_create();
12606
12607         return;
12608 }
12609
12610 void dhd_dbg_remove(void)
12611 {
12612         debugfs_remove(g_dbgfs.debugfs_mem);
12613         debugfs_remove(g_dbgfs.debugfs_dir);
12614
12615         bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
12616 }
12617 #endif /* BCMDBGFS */
12618
12619 #ifdef WLMEDIA_HTSF
12620
12621 static
12622 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
12623 {
12624         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12625         struct sk_buff *skb;
12626         uint32 htsf = 0;
12627         uint16 dport = 0, oldmagic = 0xACAC;
12628         char *p1;
12629         htsfts_t ts;
12630
12631         /*  timestamp packet  */
12632
12633         p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
12634
12635         if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
12636 /*              memcpy(&proto, p1+26, 4);       */
12637                 memcpy(&dport, p1+40, 2);
12638 /*      proto = ((ntoh32(proto))>> 16) & 0xFF;  */
12639                 dport = ntoh16(dport);
12640         }
12641
12642         /* timestamp only if  icmp or udb iperf with port 5555 */
12643 /*      if (proto == 17 && dport == tsport) { */
12644         if (dport >= tsport && dport <= tsport + 20) {
12645
12646                 skb = (struct sk_buff *) pktbuf;
12647
12648                 htsf = dhd_get_htsf(dhd, 0);
12649                 memset(skb->data + 44, 0, 2); /* clear checksum */
12650                 memcpy(skb->data+82, &oldmagic, 2);
12651                 memcpy(skb->data+84, &htsf, 4);
12652
12653                 memset(&ts, 0, sizeof(htsfts_t));
12654                 ts.magic  = HTSFMAGIC;
12655                 ts.prio   = PKTPRIO(pktbuf);
12656                 ts.seqnum = htsf_seqnum++;
12657                 ts.c10    = get_cycles();
12658                 ts.t10    = htsf;
12659                 ts.endmagic = HTSFENDMAGIC;
12660
12661                 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
12662         }
12663 }
12664
12665 static void dhd_dump_htsfhisto(histo_t *his, char *s)
12666 {
12667         int pktcnt = 0, curval = 0, i;
12668         for (i = 0; i < (NUMBIN-2); i++) {
12669                 curval += 500;
12670                 printf("%d ",  his->bin[i]);
12671                 pktcnt += his->bin[i];
12672         }
12673         printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
12674                 his->bin[NUMBIN-1], s);
12675 }
12676
12677 static
12678 void sorttobin(int value, histo_t *histo)
12679 {
12680         int i, binval = 0;
12681
12682         if (value < 0) {
12683                 histo->bin[NUMBIN-1]++;
12684                 return;
12685         }
12686         if (value > histo->bin[NUMBIN-2])  /* store the max value  */
12687                 histo->bin[NUMBIN-2] = value;
12688
12689         for (i = 0; i < (NUMBIN-2); i++) {
12690                 binval += 500; /* 500m s bins */
12691                 if (value <= binval) {
12692                         histo->bin[i]++;
12693                         return;
12694                 }
12695         }
12696         histo->bin[NUMBIN-3]++;
12697 }
12698
12699 static
12700 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
12701 {
12702         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12703         struct sk_buff *skb;
12704         char *p1;
12705         uint16 old_magic;
12706         int d1, d2, d3, end2end;
12707         htsfts_t *htsf_ts;
12708         uint32 htsf;
12709
12710         skb = PKTTONATIVE(dhdp->osh, pktbuf);
12711         p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
12712
12713         if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
12714                 memcpy(&old_magic, p1+78, 2);
12715                 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
12716         } else {
12717                 return;
12718         }
12719         if (htsf_ts->magic == HTSFMAGIC) {
12720                 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
12721                 htsf_ts->cE0 = get_cycles();
12722         }
12723
12724         if (old_magic == 0xACAC) {
12725
12726                 tspktcnt++;
12727                 htsf = dhd_get_htsf(dhd, 0);
12728                 memcpy(skb->data+92, &htsf, sizeof(uint32));
12729
12730                 memcpy(&ts[tsidx].t1, skb->data+80, 16);
12731
12732                 d1 = ts[tsidx].t2 - ts[tsidx].t1;
12733                 d2 = ts[tsidx].t3 - ts[tsidx].t2;
12734                 d3 = ts[tsidx].t4 - ts[tsidx].t3;
12735                 end2end = ts[tsidx].t4 - ts[tsidx].t1;
12736
12737                 sorttobin(d1, &vi_d1);
12738                 sorttobin(d2, &vi_d2);
12739                 sorttobin(d3, &vi_d3);
12740                 sorttobin(end2end, &vi_d4);
12741
12742                 if (end2end > 0 && end2end >  maxdelay) {
12743                         maxdelay = end2end;
12744                         maxdelaypktno = tspktcnt;
12745                         memcpy(&maxdelayts, &ts[tsidx], 16);
12746                 }
12747                 if (++tsidx >= TSMAX)
12748                         tsidx = 0;
12749         }
12750 }
12751
12752 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
12753 {
12754         uint32 htsf = 0, cur_cycle, delta, delta_us;
12755         uint32    factor, baseval, baseval2;
12756         cycles_t t;
12757
12758         t = get_cycles();
12759         cur_cycle = t;
12760
12761         if (cur_cycle >  dhd->htsf.last_cycle) {
12762                 delta = cur_cycle -  dhd->htsf.last_cycle;
12763         } else {
12764                 delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
12765         }
12766
12767         delta = delta >> 4;
12768
12769         if (dhd->htsf.coef) {
12770                 /* times ten to get the first digit */
12771                 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
12772                 baseval  = (delta*10)/factor;
12773                 baseval2 = (delta*10)/(factor+1);
12774                 delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
12775                 htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
12776         } else {
12777                 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
12778         }
12779
12780         return htsf;
12781 }
12782
12783 static void dhd_dump_latency(void)
12784 {
12785         int i, max = 0;
12786         int d1, d2, d3, d4, d5;
12787
12788         printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
12789         for (i = 0; i < TSMAX; i++) {
12790                 d1 = ts[i].t2 - ts[i].t1;
12791                 d2 = ts[i].t3 - ts[i].t2;
12792                 d3 = ts[i].t4 - ts[i].t3;
12793                 d4 = ts[i].t4 - ts[i].t1;
12794                 d5 = ts[max].t4-ts[max].t1;
12795                 if (d4 > d5 && d4 > 0)  {
12796                         max = i;
12797                 }
12798                 printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
12799                         ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
12800                         d1, d2, d3, d4, i);
12801         }
12802
12803         printf("current idx = %d \n", tsidx);
12804
12805         printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
12806         printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
12807         maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
12808         maxdelayts.t2 - maxdelayts.t1,
12809         maxdelayts.t3 - maxdelayts.t2,
12810         maxdelayts.t4 - maxdelayts.t3,
12811         maxdelayts.t4 - maxdelayts.t1);
12812 }
12813
12814
12815 static int
12816 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
12817 {
12818         wl_ioctl_t ioc;
12819         char buf[32];
12820         int ret;
12821         uint32 s1, s2;
12822
12823         struct tsf {
12824                 uint32 low;
12825                 uint32 high;
12826         } tsf_buf;
12827
12828         memset(&ioc, 0, sizeof(ioc));
12829         memset(&tsf_buf, 0, sizeof(tsf_buf));
12830
12831         ioc.cmd = WLC_GET_VAR;
12832         ioc.buf = buf;
12833         ioc.len = (uint)sizeof(buf);
12834         ioc.set = FALSE;
12835
12836         strncpy(buf, "tsf", sizeof(buf) - 1);
12837         buf[sizeof(buf) - 1] = '\0';
12838         s1 = dhd_get_htsf(dhd, 0);
12839         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
12840                 if (ret == -EIO) {
12841                         DHD_ERROR(("%s: tsf is not supported by device\n",
12842                                 dhd_ifname(&dhd->pub, ifidx)));
12843                         return -EOPNOTSUPP;
12844                 }
12845                 return ret;
12846         }
12847         s2 = dhd_get_htsf(dhd, 0);
12848
12849         memcpy(&tsf_buf, buf, sizeof(tsf_buf));
12850         printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
12851                 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
12852                 dhd->htsf.coefdec2, s2-tsf_buf.low);
12853         printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
12854         return 0;
12855 }
12856
12857 void htsf_update(dhd_info_t *dhd, void *data)
12858 {
12859         static ulong  cur_cycle = 0, prev_cycle = 0;
12860         uint32 htsf, tsf_delta = 0;
12861         uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
12862         ulong b, a;
12863         cycles_t t;
12864
12865         /* cycles_t in inlcude/mips/timex.h */
12866
12867         t = get_cycles();
12868
12869         prev_cycle = cur_cycle;
12870         cur_cycle = t;
12871
12872         if (cur_cycle > prev_cycle)
12873                 cyc_delta = cur_cycle - prev_cycle;
12874         else {
12875                 b = cur_cycle;
12876                 a = prev_cycle;
12877                 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
12878         }
12879
12880         if (data == NULL)
12881                 printf(" tsf update ata point er is null \n");
12882
12883         memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
12884         memcpy(&cur_tsf, data, sizeof(tsf_t));
12885
12886         if (cur_tsf.low == 0) {
12887                 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
12888                 return;
12889         }
12890
12891         if (cur_tsf.low > prev_tsf.low)
12892                 tsf_delta = (cur_tsf.low - prev_tsf.low);
12893         else {
12894                 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
12895                  cur_tsf.low, prev_tsf.low));
12896                 if (cur_tsf.high > prev_tsf.high) {
12897                         tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
12898                         DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
12899                 } else {
12900                         return; /* do not update */
12901                 }
12902         }
12903
12904         if (tsf_delta)  {
12905                 hfactor = cyc_delta / tsf_delta;
12906                 tmp  =  (cyc_delta - (hfactor * tsf_delta))*10;
12907                 dec1 =  tmp/tsf_delta;
12908                 dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
12909                 tmp  =  (tmp   - (dec1*tsf_delta))*10;
12910                 dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
12911
12912                 if (dec3 > 4) {
12913                         if (dec2 == 9) {
12914                                 dec2 = 0;
12915                                 if (dec1 == 9) {
12916                                         dec1 = 0;
12917                                         hfactor++;
12918                                 } else {
12919                                         dec1++;
12920                                 }
12921                         } else {
12922                                 dec2++;
12923                         }
12924                 }
12925         }
12926
12927         if (hfactor) {
12928                 htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
12929                 dhd->htsf.coef = hfactor;
12930                 dhd->htsf.last_cycle = cur_cycle;
12931                 dhd->htsf.last_tsf = cur_tsf.low;
12932                 dhd->htsf.coefdec1 = dec1;
12933                 dhd->htsf.coefdec2 = dec2;
12934         } else {
12935                 htsf = prev_tsf.low;
12936         }
12937 }
12938
12939 #endif /* WLMEDIA_HTSF */
12940
12941 #ifdef CUSTOM_SET_CPUCORE
12942 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
12943 {
12944         int e_dpc = 0, e_rxf = 0, retry_set = 0;
12945
12946         if (!(dhd->chan_isvht80)) {
12947                 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
12948                 return;
12949         }
12950
12951         if (DPC_CPUCORE) {
12952                 do {
12953                         if (set == TRUE) {
12954                                 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12955                                         cpumask_of(DPC_CPUCORE));
12956                         } else {
12957                                 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12958                                         cpumask_of(PRIMARY_CPUCORE));
12959                         }
12960                         if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12961                                 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
12962                                 return;
12963                         }
12964                         if (e_dpc < 0)
12965                                 OSL_SLEEP(1);
12966                 } while (e_dpc < 0);
12967         }
12968         if (RXF_CPUCORE) {
12969                 do {
12970                         if (set == TRUE) {
12971                                 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
12972                                         cpumask_of(RXF_CPUCORE));
12973                         } else {
12974                                 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
12975                                         cpumask_of(PRIMARY_CPUCORE));
12976                         }
12977                         if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12978                                 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
12979                                 return;
12980                         }
12981                         if (e_rxf < 0)
12982                                 OSL_SLEEP(1);
12983                 } while (e_rxf < 0);
12984         }
12985 #ifdef DHD_OF_SUPPORT
12986         interrupt_set_cpucore(set);
12987 #endif /* DHD_OF_SUPPORT */
12988         DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
12989
12990         return;
12991 }
12992 #endif /* CUSTOM_SET_CPUCORE */
12993
12994 /* Get interface specific ap_isolate configuration */
12995 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
12996 {
12997         dhd_info_t *dhd = dhdp->info;
12998         dhd_if_t *ifp;
12999
13000         ASSERT(idx < DHD_MAX_IFS);
13001
13002         ifp = dhd->iflist[idx];
13003
13004         return ifp->ap_isolate;
13005 }
13006
13007 /* Set interface specific ap_isolate configuration */
13008 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
13009 {
13010         dhd_info_t *dhd = dhdp->info;
13011         dhd_if_t *ifp;
13012
13013         ASSERT(idx < DHD_MAX_IFS);
13014
13015         ifp = dhd->iflist[idx];
13016
13017         ifp->ap_isolate = val;
13018
13019         return 0;
13020 }
13021
13022 #ifdef DHD_FW_COREDUMP
13023
13024
13025 #ifdef CUSTOMER_HW4_DEBUG
13026 #ifdef PLATFORM_SLP
13027 #define MEMDUMPINFO "/opt/etc/.memdump.info"
13028 #else
13029 #define MEMDUMPINFO "/data/.memdump.info"
13030 #endif /* PLATFORM_SLP */
13031 #elif defined(CUSTOMER_HW2)
13032 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
13033 #else
13034 #define MEMDUMPINFO "/installmedia/.memdump.info"
13035 #endif /* CUSTOMER_HW4_DEBUG */
13036
13037 void dhd_get_memdump_info(dhd_pub_t *dhd)
13038 {
13039         struct file *fp = NULL;
13040         uint32 mem_val = DUMP_MEMFILE_MAX;
13041         int ret = 0;
13042         char *filepath = MEMDUMPINFO;
13043
13044         /* Read memdump info from the file */
13045         fp = filp_open(filepath, O_RDONLY, 0);
13046         if (IS_ERR(fp)) {
13047                 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13048                 goto done;
13049         } else {
13050                 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13051                 if (ret < 0) {
13052                         DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13053                         filp_close(fp, NULL);
13054                         goto done;
13055                 }
13056
13057                 mem_val = bcm_atoi((char *)&mem_val);
13058
13059                 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
13060                 filp_close(fp, NULL);
13061         }
13062
13063 done:
13064 #ifdef CUSTOMER_HW4_DEBUG
13065         dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
13066 #else
13067         dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
13068 #endif /* CUSTOMER_HW4_DEBUG */
13069 }
13070
13071
13072 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
13073 {
13074         dhd_dump_t *dump = NULL;
13075         dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
13076         if (dump == NULL) {
13077                 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
13078                 return;
13079         }
13080         dump->buf = buf;
13081         dump->bufsize = size;
13082
13083 #if defined(CONFIG_ARM64)
13084         DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
13085                 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
13086 #elif defined(__ARM_ARCH_7A__)
13087         DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
13088                 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
13089 #endif /* __ARM_ARCH_7A__ */
13090         if (dhdp->memdump_enabled == DUMP_MEMONLY) {
13091                 BUG_ON(1);
13092         }
13093
13094 #ifdef DHD_LOG_DUMP
13095         if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
13096                 dhd_schedule_log_dump(dhdp);
13097         }
13098 #endif /* DHD_LOG_DUMP */
13099         dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
13100                 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
13101 }
13102 static void
13103 dhd_mem_dump(void *handle, void *event_info, u8 event)
13104 {
13105         dhd_info_t *dhd = handle;
13106         dhd_dump_t *dump = event_info;
13107
13108         if (!dhd) {
13109                 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13110                 return;
13111         }
13112
13113         if (!dump) {
13114                 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
13115                 return;
13116         }
13117
13118         if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
13119                 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
13120         }
13121
13122         if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
13123 #ifdef DHD_LOG_DUMP
13124                 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
13125 #endif
13126                 TRUE) {
13127                 BUG_ON(1);
13128         }
13129         MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
13130 }
13131 #endif /* DHD_FW_COREDUMP */
13132
13133 #ifdef DHD_LOG_DUMP
13134 static void
13135 dhd_log_dump(void *handle, void *event_info, u8 event)
13136 {
13137         dhd_info_t *dhd = handle;
13138
13139         if (!dhd) {
13140                 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13141                 return;
13142         }
13143
13144         if (do_dhd_log_dump(&dhd->pub)) {
13145                 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
13146                 return;
13147         }
13148 }
13149
13150 void dhd_schedule_log_dump(dhd_pub_t *dhdp)
13151 {
13152         dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
13153                 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
13154                 dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
13155 }
13156
13157 static int
13158 do_dhd_log_dump(dhd_pub_t *dhdp)
13159 {
13160         int ret = 0;
13161         struct file *fp = NULL;
13162         mm_segment_t old_fs;
13163         loff_t pos = 0;
13164         char dump_path[128];
13165         char common_info[1024];
13166         struct timeval curtime;
13167         uint32 file_mode;
13168         unsigned long flags = 0;
13169
13170         if (!dhdp) {
13171                 return -1;
13172         }
13173
13174         /* Building the additional information like DHD, F/W version */
13175         memset(common_info, 0, sizeof(common_info));
13176         snprintf(common_info, sizeof(common_info),
13177                 "---------- Common information ----------\n"
13178                 "DHD version: %s\n"
13179                 "F/W version: %s\n"
13180                 "----------------------------------------\n",
13181                 dhd_version, fw_version);
13182
13183         /* change to KERNEL_DS address limit */
13184         old_fs = get_fs();
13185         set_fs(KERNEL_DS);
13186
13187         /* Init file name */
13188         memset(dump_path, 0, sizeof(dump_path));
13189         do_gettimeofday(&curtime);
13190         snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
13191                 DHD_COMMON_DUMP_PATH "debug_dump",
13192                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
13193         file_mode = O_CREAT | O_WRONLY | O_SYNC;
13194
13195         DHD_ERROR(("debug_dump_path = %s\n", dump_path));
13196         fp = filp_open(dump_path, file_mode, 0644);
13197         if (IS_ERR(fp)) {
13198                 ret = PTR_ERR(fp);
13199                 DHD_ERROR(("open file error, err = %d\n", ret));
13200                 ret = -1;
13201                 goto exit;
13202         }
13203
13204         fp->f_op->write(fp, common_info, strlen(common_info), &pos);
13205         if (dhdp->dld_buf.wraparound) {
13206                 fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
13207         } else {
13208                 fp->f_op->write(fp, dhdp->dld_buf.buffer,
13209                         (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
13210         }
13211
13212         /* re-init dhd_log_dump_buf structure */
13213         spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
13214         dhdp->dld_buf.wraparound = 0;
13215         dhdp->dld_buf.present = dhdp->dld_buf.front;
13216         dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13217         bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13218         spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
13219 exit:
13220         if (!ret) {
13221                 filp_close(fp, NULL);
13222         }
13223         set_fs(old_fs);
13224
13225         return ret;
13226 }
13227 #endif /* DHD_LOG_DUMP */
13228
13229 #ifdef BCMASSERT_LOG
13230 #ifdef CUSTOMER_HW4_DEBUG
13231 #ifdef PLATFORM_SLP
13232 #define ASSERTINFO "/opt/etc/.assert.info"
13233 #else
13234 #define ASSERTINFO "/data/.assert.info"
13235 #endif /* PLATFORM_SLP */
13236 #elif defined(CUSTOMER_HW2)
13237 #define ASSERTINFO "/data/misc/wifi/.assert.info"
13238 #else
13239 #define ASSERTINFO "/installmedia/.assert.info"
13240 #endif /* CUSTOMER_HW4_DEBUG */
13241 void dhd_get_assert_info(dhd_pub_t *dhd)
13242 {
13243         struct file *fp = NULL;
13244         char *filepath = ASSERTINFO;
13245
13246         /*
13247          * Read assert info from the file
13248          * 0: Trigger Kernel crash by panic()
13249          * 1: Print out the logs and don't trigger Kernel panic. (default)
13250          * 2: Trigger Kernel crash by BUG()
13251          * File doesn't exist: Keep default value (1).
13252          */
13253         fp = filp_open(filepath, O_RDONLY, 0);
13254         if (IS_ERR(fp)) {
13255                 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13256         } else {
13257                 int mem_val = 0;
13258                 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13259                 if (ret < 0) {
13260                         DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13261                 } else {
13262                         mem_val = bcm_atoi((char *)&mem_val);
13263                         DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
13264                         g_assert_type = mem_val;
13265                 }
13266                 filp_close(fp, NULL);
13267         }
13268 }
13269 #endif /* BCMASSERT_LOG */
13270
13271
13272 #ifdef DHD_WMF
13273 /* Returns interface specific WMF configuration */
13274 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
13275 {
13276         dhd_info_t *dhd = dhdp->info;
13277         dhd_if_t *ifp;
13278
13279         ASSERT(idx < DHD_MAX_IFS);
13280
13281         ifp = dhd->iflist[idx];
13282         return &ifp->wmf;
13283 }
13284 #endif /* DHD_WMF */
13285
13286
13287 #if defined(DHD_L2_FILTER)
13288 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
13289 {
13290         return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
13291 }
13292 #endif 
13293
13294 #ifdef DHD_L2_FILTER
13295 arp_table_t*
13296 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
13297 {
13298         dhd_info_t *dhd = dhdp->info;
13299         dhd_if_t *ifp;
13300
13301         ASSERT(bssidx < DHD_MAX_IFS);
13302
13303         ifp = dhd->iflist[bssidx];
13304         return ifp->phnd_arp_table;
13305 }
13306
13307 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
13308 {
13309         dhd_info_t *dhd = dhdp->info;
13310         dhd_if_t *ifp;
13311
13312         ASSERT(idx < DHD_MAX_IFS);
13313
13314         ifp = dhd->iflist[idx];
13315
13316         if (ifp)
13317                 return ifp->parp_enable;
13318         else
13319                 return FALSE;
13320 }
13321
13322 /* Set interface specific proxy arp configuration */
13323 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13324 {
13325         dhd_info_t *dhd = dhdp->info;
13326         dhd_if_t *ifp;
13327         ASSERT(idx < DHD_MAX_IFS);
13328         ifp = dhd->iflist[idx];
13329
13330         if (!ifp)
13331             return BCME_ERROR;
13332
13333         /* At present all 3 variables are being
13334          * handled at once
13335          */
13336         ifp->parp_enable = val;
13337         ifp->parp_discard = val;
13338         ifp->parp_allnode = !val;
13339
13340         /* Flush ARP entries when disabled */
13341         if (val == FALSE) {
13342                 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
13343                         FALSE, dhdp->tickcnt);
13344         }
13345         return BCME_OK;
13346 }
13347
13348 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13349 {
13350         dhd_info_t *dhd = dhdp->info;
13351         dhd_if_t *ifp;
13352
13353         ASSERT(idx < DHD_MAX_IFS);
13354
13355         ifp = dhd->iflist[idx];
13356
13357         ASSERT(ifp);
13358         return ifp->parp_discard;
13359 }
13360
13361 bool
13362 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13363 {
13364         dhd_info_t *dhd = dhdp->info;
13365         dhd_if_t *ifp;
13366
13367         ASSERT(idx < DHD_MAX_IFS);
13368
13369         ifp = dhd->iflist[idx];
13370
13371         ASSERT(ifp);
13372
13373         return ifp->parp_allnode;
13374 }
13375
13376 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
13377 {
13378         dhd_info_t *dhd = dhdp->info;
13379         dhd_if_t *ifp;
13380
13381         ASSERT(idx < DHD_MAX_IFS);
13382
13383         ifp = dhd->iflist[idx];
13384
13385         ASSERT(ifp);
13386
13387         return ifp->dhcp_unicast;
13388 }
13389
13390 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
13391 {
13392         dhd_info_t *dhd = dhdp->info;
13393         dhd_if_t *ifp;
13394         ASSERT(idx < DHD_MAX_IFS);
13395         ifp = dhd->iflist[idx];
13396
13397         ASSERT(ifp);
13398
13399         ifp->dhcp_unicast = val;
13400         return BCME_OK;
13401 }
13402
13403 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
13404 {
13405         dhd_info_t *dhd = dhdp->info;
13406         dhd_if_t *ifp;
13407
13408         ASSERT(idx < DHD_MAX_IFS);
13409
13410         ifp = dhd->iflist[idx];
13411
13412         ASSERT(ifp);
13413
13414         return ifp->block_ping;
13415 }
13416
13417 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
13418 {
13419         dhd_info_t *dhd = dhdp->info;
13420         dhd_if_t *ifp;
13421         ASSERT(idx < DHD_MAX_IFS);
13422         ifp = dhd->iflist[idx];
13423
13424         ASSERT(ifp);
13425
13426         ifp->block_ping = val;
13427
13428         return BCME_OK;
13429 }
13430
13431 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
13432 {
13433         dhd_info_t *dhd = dhdp->info;
13434         dhd_if_t *ifp;
13435
13436         ASSERT(idx < DHD_MAX_IFS);
13437
13438         ifp = dhd->iflist[idx];
13439
13440         ASSERT(ifp);
13441
13442         return ifp->grat_arp;
13443 }
13444
13445 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13446 {
13447         dhd_info_t *dhd = dhdp->info;
13448         dhd_if_t *ifp;
13449         ASSERT(idx < DHD_MAX_IFS);
13450         ifp = dhd->iflist[idx];
13451
13452         ASSERT(ifp);
13453
13454         ifp->grat_arp = val;
13455
13456         return BCME_OK;
13457 }
13458 #endif /* DHD_L2_FILTER */
13459
13460
13461 #if defined(SET_RPS_CPUS)
13462 int dhd_rps_cpus_enable(struct net_device *net, int enable)
13463 {
13464         dhd_info_t *dhd = DHD_DEV_INFO(net);
13465         dhd_if_t *ifp;
13466         int ifidx;
13467         char * RPS_CPU_SETBUF;
13468
13469         ifidx = dhd_net2idx(dhd, net);
13470         if (ifidx == DHD_BAD_IF) {
13471                 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
13472                 return -ENODEV;
13473         }
13474
13475         if (ifidx == PRIMARY_INF) {
13476                 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
13477                         DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
13478                         RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
13479                 } else {
13480                         DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
13481                         RPS_CPU_SETBUF = RPS_CPUS_MASK;
13482                 }
13483         } else if (ifidx == VIRTUAL_INF) {
13484                 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
13485                 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
13486         } else {
13487                 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
13488                 return -EINVAL;
13489         }
13490
13491         ifp = dhd->iflist[ifidx];
13492         if (ifp) {
13493                 if (enable) {
13494                         DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
13495                         custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
13496                 } else {
13497                         custom_rps_map_clear(ifp->net->_rx);
13498                 }
13499         } else {
13500                 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
13501                 return -ENODEV;
13502         }
13503         return BCME_OK;
13504 }
13505
13506 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
13507 {
13508         struct rps_map *old_map, *map;
13509         cpumask_var_t mask;
13510         int err, cpu, i;
13511         static DEFINE_SPINLOCK(rps_map_lock);
13512
13513         DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13514
13515         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
13516                 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
13517                 return -ENOMEM;
13518         }
13519
13520         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
13521         if (err) {
13522                 free_cpumask_var(mask);
13523                 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
13524                 return err;
13525         }
13526
13527         map = kzalloc(max_t(unsigned int,
13528                 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
13529                 GFP_KERNEL);
13530         if (!map) {
13531                 free_cpumask_var(mask);
13532                 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
13533                 return -ENOMEM;
13534         }
13535
13536         i = 0;
13537         for_each_cpu(cpu, mask) {
13538                 map->cpus[i++] = cpu;
13539         }
13540
13541         if (i) {
13542                 map->len = i;
13543         } else {
13544                 kfree(map);
13545                 map = NULL;
13546                 free_cpumask_var(mask);
13547                 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
13548                 return -1;
13549         }
13550
13551         spin_lock(&rps_map_lock);
13552         old_map = rcu_dereference_protected(queue->rps_map,
13553                 lockdep_is_held(&rps_map_lock));
13554         rcu_assign_pointer(queue->rps_map, map);
13555         spin_unlock(&rps_map_lock);
13556
13557         if (map) {
13558                 static_key_slow_inc(&rps_needed);
13559         }
13560         if (old_map) {
13561                 kfree_rcu(old_map, rcu);
13562                 static_key_slow_dec(&rps_needed);
13563         }
13564         free_cpumask_var(mask);
13565
13566         DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
13567         return map->len;
13568 }
13569
13570 void custom_rps_map_clear(struct netdev_rx_queue *queue)
13571 {
13572         struct rps_map *map;
13573
13574         DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13575
13576         map = rcu_dereference_protected(queue->rps_map, 1);
13577         if (map) {
13578                 RCU_INIT_POINTER(queue->rps_map, NULL);
13579                 kfree_rcu(map, rcu);
13580                 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
13581         }
13582 }
13583 #endif 
13584
13585
13586
13587 #ifdef DHD_DEBUG_PAGEALLOC
13588
13589 void
13590 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
13591 {
13592         dhd_pub_t *dhdp = (dhd_pub_t *)handle;
13593
13594         DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
13595                 __FUNCTION__, addr_corrupt, (uint32)len));
13596
13597         DHD_OS_WAKE_LOCK(dhdp);
13598         prhex("Page Corruption:", addr_corrupt, len);
13599         dhd_dump_to_kernelog(dhdp);
13600 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13601         /* Load the dongle side dump to host memory and then BUG_ON() */
13602         dhdp->memdump_enabled = DUMP_MEMONLY;
13603         dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
13604         dhd_bus_mem_dump(dhdp);
13605 #endif /* BCMPCIE && DHD_FW_COREDUMP */
13606         DHD_OS_WAKE_UNLOCK(dhdp);
13607 }
13608 EXPORT_SYMBOL(dhd_page_corrupt_cb);
13609 #endif /* DHD_DEBUG_PAGEALLOC */
13610
13611 #ifdef DHD_PKTID_AUDIT_ENABLED
13612 void
13613 dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
13614 {
13615         DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
13616         DHD_OS_WAKE_LOCK(dhdp);
13617         dhd_dump_to_kernelog(dhdp);
13618 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13619         /* Load the dongle side dump to host memory and then BUG_ON() */
13620         dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
13621         dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
13622         dhd_bus_mem_dump(dhdp);
13623 #endif /* BCMPCIE && DHD_FW_COREDUMP */
13624         DHD_OS_WAKE_UNLOCK(dhdp);
13625 }
13626 #endif /* DHD_PKTID_AUDIT_ENABLED */
13627
13628 /* ----------------------------------------------------------------------------
13629  * Infrastructure code for sysfs interface support for DHD
13630  *
13631  * What is sysfs interface?
13632  * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
13633  *
13634  * Why sysfs interface?
13635  * This is the Linux standard way of changing/configuring Run Time parameters
13636  * for a driver. We can use this interface to control "linux" specific driver
13637  * parameters.
13638  *
13639  * -----------------------------------------------------------------------------
13640  */
13641
13642 #include <linux/sysfs.h>
13643 #include <linux/kobject.h>
13644
13645 #if defined(DHD_TRACE_WAKE_LOCK)
13646
13647 /* Function to show the history buffer */
13648 static ssize_t
13649 show_wklock_trace(struct dhd_info *dev, char *buf)
13650 {
13651         ssize_t ret = 0;
13652         dhd_info_t *dhd = (dhd_info_t *)dev;
13653
13654         buf[ret] = '\n';
13655         buf[ret+1] = 0;
13656
13657         dhd_wk_lock_stats_dump(&dhd->pub);
13658         return ret+1;
13659 }
13660
13661 /* Function to enable/disable wakelock trace */
13662 static ssize_t
13663 wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
13664 {
13665         unsigned long onoff;
13666         unsigned long flags;
13667         dhd_info_t *dhd = (dhd_info_t *)dev;
13668
13669         onoff = bcm_strtoul(buf, NULL, 10);
13670         if (onoff != 0 && onoff != 1) {
13671                 return -EINVAL;
13672         }
13673
13674         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
13675         trace_wklock_onoff = onoff;
13676         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
13677         if (trace_wklock_onoff) {
13678                 printk("ENABLE WAKLOCK TRACE\n");
13679         } else {
13680                 printk("DISABLE WAKELOCK TRACE\n");
13681         }
13682
13683         return (ssize_t)(onoff+1);
13684 }
13685 #endif /* DHD_TRACE_WAKE_LOCK */
13686
13687 /*
13688  * Generic Attribute Structure for DHD.
13689  * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
13690  * to instantiate an object of type dhd_attr,  populate it with
13691  * the required show/store functions (ex:- dhd_attr_cpumask_primary)
13692  * and add the object to default_attrs[] array, that gets registered
13693  * to the kobject of dhd (named bcm-dhd).
13694  */
13695
13696 struct dhd_attr {
13697         struct attribute attr;
13698         ssize_t(*show)(struct dhd_info *, char *);
13699         ssize_t(*store)(struct dhd_info *, const char *, size_t count);
13700 };
13701
13702 #if defined(DHD_TRACE_WAKE_LOCK)
13703 static struct dhd_attr dhd_attr_wklock =
13704         __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
13705 #endif /* defined(DHD_TRACE_WAKE_LOCK */
13706
13707 /* Attribute object that gets registered with "bcm-dhd" kobject tree */
13708 static struct attribute *default_attrs[] = {
13709 #if defined(DHD_TRACE_WAKE_LOCK)
13710         &dhd_attr_wklock.attr,
13711 #endif
13712         NULL
13713 };
13714
13715 #define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
13716 #define to_attr(a) container_of(a, struct dhd_attr, attr)
13717
13718 /*
13719  * bcm-dhd kobject show function, the "attr" attribute specifices to which
13720  * node under "bcm-dhd" the show function is called.
13721  */
13722 static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
13723 {
13724         dhd_info_t *dhd = to_dhd(kobj);
13725         struct dhd_attr *d_attr = to_attr(attr);
13726         int ret;
13727
13728         if (d_attr->show)
13729                 ret = d_attr->show(dhd, buf);
13730         else
13731                 ret = -EIO;
13732
13733         return ret;
13734 }
13735
13736
13737 /*
13738  * bcm-dhd kobject show function, the "attr" attribute specifices to which
13739  * node under "bcm-dhd" the store function is called.
13740  */
13741 static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
13742         const char *buf, size_t count)
13743 {
13744         dhd_info_t *dhd = to_dhd(kobj);
13745         struct dhd_attr *d_attr = to_attr(attr);
13746         int ret;
13747
13748         if (d_attr->store)
13749                 ret = d_attr->store(dhd, buf, count);
13750         else
13751                 ret = -EIO;
13752
13753         return ret;
13754
13755 }
13756
13757 static struct sysfs_ops dhd_sysfs_ops = {
13758         .show = dhd_show,
13759         .store = dhd_store,
13760 };
13761
13762 static struct kobj_type dhd_ktype = {
13763         .sysfs_ops = &dhd_sysfs_ops,
13764         .default_attrs = default_attrs,
13765 };
13766
13767 /* Create a kobject and attach to sysfs interface */
13768 static int dhd_sysfs_init(dhd_info_t *dhd)
13769 {
13770         int ret = -1;
13771
13772         if (dhd == NULL) {
13773                 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13774                 return ret;
13775         }
13776
13777         /* Initialize the kobject */
13778         ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
13779         if (ret) {
13780                 kobject_put(&dhd->dhd_kobj);
13781                 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
13782                 return ret;
13783         }
13784
13785         /*
13786          * We are always responsible for sending the uevent that the kobject
13787          * was added to the system.
13788          */
13789         kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
13790
13791         return ret;
13792 }
13793
13794 /* Done with the kobject and detach the sysfs interface */
13795 static void dhd_sysfs_exit(dhd_info_t *dhd)
13796 {
13797         if (dhd == NULL) {
13798                 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13799                 return;
13800         }
13801
13802         /* Releae the kobject */
13803         kobject_put(&dhd->dhd_kobj);
13804 }
13805
13806 #ifdef DHD_LOG_DUMP
13807 void
13808 dhd_log_dump_init(dhd_pub_t *dhd)
13809 {
13810         spin_lock_init(&dhd->dld_buf.lock);
13811 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13812         dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
13813                 DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
13814 #else
13815         dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13816 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13817
13818         if (!dhd->dld_buf.buffer) {
13819                 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13820                 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
13821
13822                 if (!dhd->dld_buf.buffer) {
13823                         DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
13824                         return;
13825                 }
13826         }
13827
13828         dhd->dld_buf.wraparound = 0;
13829         dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
13830         dhd->dld_buf.present = dhd->dld_buf.buffer;
13831         dhd->dld_buf.front = dhd->dld_buf.buffer;
13832         dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13833         dhd->dld_enable = 1;
13834 }
13835
13836 void
13837 dhd_log_dump_deinit(dhd_pub_t *dhd)
13838 {
13839         dhd->dld_enable = 0;
13840 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13841         DHD_OS_PREFREE(dhd,
13842                 dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13843 #else
13844         kfree(dhd->dld_buf.buffer);
13845 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13846 }
13847
13848 void
13849 dhd_log_dump_print(const char *fmt, ...)
13850 {
13851         int len = 0;
13852         char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
13853         va_list args;
13854         dhd_pub_t *dhd = NULL;
13855         unsigned long flags = 0;
13856
13857         if (wl_get_bcm_cfg80211_ptr()) {
13858                 dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
13859         }
13860
13861         if (!dhd || dhd->dld_enable != 1) {
13862                 return;
13863         }
13864
13865         va_start(args, fmt);
13866
13867         len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
13868         if (len < 0) {
13869                 return;
13870         }
13871
13872         /* make a critical section to eliminate race conditions */
13873         spin_lock_irqsave(&dhd->dld_buf.lock, flags);
13874         if (dhd->dld_buf.remain < len) {
13875                 dhd->dld_buf.wraparound = 1;
13876                 dhd->dld_buf.present = dhd->dld_buf.front;
13877                 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13878         }
13879
13880         strncpy(dhd->dld_buf.present, tmp_buf, len);
13881         dhd->dld_buf.remain -= len;
13882         dhd->dld_buf.present += len;
13883         spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
13884
13885         /* double check invalid memory operation */
13886         ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
13887         va_end(args);
13888 }
13889
13890 char*
13891 dhd_log_dump_get_timestamp(void)
13892 {
13893         static char buf[16];
13894         u64 ts_nsec;
13895         unsigned long rem_nsec;
13896
13897         ts_nsec = local_clock();
13898         rem_nsec = do_div(ts_nsec, 1000000000);
13899         snprintf(buf, sizeof(buf), "%5lu.%06lu",
13900                 (unsigned long)ts_nsec, rem_nsec / 1000);
13901
13902         return buf;
13903 }
13904
13905 #endif /* DHD_LOG_DUMP */
13906
13907 /* ---------------------------- End of sysfs implementation ------------------------------------- */
13908
13909 void *dhd_get_pub(struct net_device *dev)
13910 {
13911         dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
13912         if (dhdinfo)
13913                 return (void *)&dhdinfo->pub;
13914         else
13915                 return NULL;
13916 }
13917
13918 bool dhd_os_wd_timer_enabled(void *bus)
13919 {
13920         dhd_pub_t *pub = bus;
13921         dhd_info_t *dhd = (dhd_info_t *)pub->info;
13922
13923         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13924         if (!dhd) {
13925                 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13926                 return FALSE;
13927         }
13928         return dhd->wd_timer_valid;
13929 }