Merge branch 'linux-linaro-lsk-v4.4-android' of git://git.linaro.org/kernel/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_linux.c
1 /*
2  * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3  * Basically selected code segments from usb-cdc.c and usb-rndis.c
4  *
5  * Copyright (C) 1999-2016, Broadcom Corporation
6  * 
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  * 
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  * 
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
29  */
30
31 #include <typedefs.h>
32 #include <linuxver.h>
33 #include <osl.h>
34 #ifdef SHOW_LOGTRACE
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
38
39
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/etherdevice.h>
48 #include <linux/random.h>
49 #include <linux/spinlock.h>
50 #include <linux/ethtool.h>
51 #include <linux/fcntl.h>
52 #include <linux/fs.h>
53 #include <linux/ip.h>
54 #include <linux/reboot.h>
55 #include <linux/notifier.h>
56 #include <net/addrconf.h>
57 #ifdef ENABLE_ADAPTIVE_SCHED
58 #include <linux/cpufreq.h>
59 #endif /* ENABLE_ADAPTIVE_SCHED */
60
61 #include <asm/uaccess.h>
62 #include <asm/unaligned.h>
63
64 #include <epivers.h>
65 #include <bcmutils.h>
66 #include <bcmendian.h>
67 #include <bcmdevs.h>
68
69 #include <proto/ethernet.h>
70 #include <proto/bcmevent.h>
71 #include <proto/vlan.h>
72 #include <proto/802.3.h>
73
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
76 #include <dhd.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
80 #endif
81 #include <dhd_bus.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
84 #ifdef WL_ESCAN
85 #include <wl_escan.h>
86 #endif
87 #include <dhd_dbg.h>
88 #ifdef CONFIG_HAS_WAKELOCK
89 #include <linux/wakelock.h>
90 #endif
91 #ifdef WL_CFG80211
92 #include <wl_cfg80211.h>
93 #endif
94 #ifdef PNO_SUPPORT
95 #include <dhd_pno.h>
96 #endif
97 #ifdef RTT_SUPPORT
98 #include <dhd_rtt.h>
99 #endif
100
101 #ifdef CONFIG_COMPAT
102 #include <linux/compat.h>
103 #endif
104
105 #ifdef DHD_WMF
106 #include <dhd_wmf_linux.h>
107 #endif /* DHD_WMF */
108
109 #ifdef DHD_L2_FILTER
110 #include <proto/bcmicmp.h>
111 #include <bcm_l2_filter.h>
112 #include <dhd_l2_filter.h>
113 #endif /* DHD_L2_FILTER */
114
115 #ifdef DHD_PSTA
116 #include <dhd_psta.h>
117 #endif /* DHD_PSTA */
118
119
120 #ifdef DHDTCPACK_SUPPRESS
121 #include <dhd_ip.h>
122 #endif /* DHDTCPACK_SUPPRESS */
123
124 #ifdef DHD_DEBUG_PAGEALLOC
125 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
126 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
127 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
128 #endif /* DHD_DEBUG_PAGEALLOC */
129
130
131 #if defined(DHD_LB)
132 /* Dynamic CPU selection for load balancing */
133 #include <linux/cpu.h>
134 #include <linux/cpumask.h>
135 #include <linux/notifier.h>
136 #include <linux/workqueue.h>
137 #include <asm/atomic.h>
138
139 #if !defined(DHD_LB_PRIMARY_CPUS)
140 #define DHD_LB_PRIMARY_CPUS     0x0 /* Big CPU coreids mask */
141 #endif
142
143 #if !defined(DHD_LB_SECONDARY_CPUS)
144 #define DHD_LB_SECONDARY_CPUS   0xFE /* Little CPU coreids mask */
145 #endif
146
147 #define HIST_BIN_SIZE   8
148
149 #if defined(DHD_LB_RXP)
150 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
151 #endif /* DHD_LB_RXP */
152
153 #endif /* DHD_LB */
154
155 #ifdef WLMEDIA_HTSF
156 #include <linux/time.h>
157 #include <htsf.h>
158
159 #define HTSF_MINLEN 200    /* min. packet length to timestamp */
160 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
161 #define TSMAX  1000        /* max no. of timing record kept   */
162 #define NUMBIN 34
163
164 static uint32 tsidx = 0;
165 static uint32 htsf_seqnum = 0;
166 uint32 tsfsync;
167 struct timeval tsync;
168 static uint32 tsport = 5010;
169
170 typedef struct histo_ {
171         uint32 bin[NUMBIN];
172 } histo_t;
173
174 #if !ISPOWEROF2(DHD_SDALIGN)
175 #error DHD_SDALIGN is not a power of 2!
176 #endif
177
178 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
179 #endif /* WLMEDIA_HTSF */
180
181 #ifdef STBLINUX
182 #ifdef quote_str
183 #undef quote_str
184 #endif /* quote_str */
185 #ifdef to_str
186 #undef to_str
187 #endif /* quote_str */
188 #define to_str(s) #s
189 #define quote_str(s) to_str(s)
190
191 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
192 #endif /* STBLINUX */
193
194
195 #if defined(SOFTAP)
196 extern bool ap_cfg_running;
197 extern bool ap_fw_loaded;
198 #endif
199 extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
200
201 #ifdef FIX_CPU_MIN_CLOCK
202 #include <linux/pm_qos.h>
203 #endif /* FIX_CPU_MIN_CLOCK */
204 #ifdef SET_RANDOM_MAC_SOFTAP
205 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
206 #define CONFIG_DHD_SET_RANDOM_MAC_VAL   0x001A11
207 #endif
208 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
209 #endif /* SET_RANDOM_MAC_SOFTAP */
210 #ifdef ENABLE_ADAPTIVE_SCHED
211 #define DEFAULT_CPUFREQ_THRESH          1000000 /* threshold frequency : 1000000 = 1GHz */
212 #ifndef CUSTOM_CPUFREQ_THRESH
213 #define CUSTOM_CPUFREQ_THRESH   DEFAULT_CPUFREQ_THRESH
214 #endif /* CUSTOM_CPUFREQ_THRESH */
215 #endif /* ENABLE_ADAPTIVE_SCHED */
216
217 /* enable HOSTIP cache update from the host side when an eth0:N is up */
218 #define AOE_IP_ALIAS_SUPPORT 1
219
220 #ifdef BCM_FD_AGGR
221 #include <bcm_rpc.h>
222 #include <bcm_rpc_tp.h>
223 #endif
224 #ifdef PROP_TXSTATUS
225 #include <wlfc_proto.h>
226 #include <dhd_wlfc.h>
227 #endif
228
229 #include <wl_android.h>
230
231 /* Maximum STA per radio */
232 #define DHD_MAX_STA     32
233
234
235
236 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
237 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
238 #define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
239
240 #ifdef ARP_OFFLOAD_SUPPORT
241 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
242 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
243         unsigned long event, void *ptr);
244 static struct notifier_block dhd_inetaddr_notifier = {
245         .notifier_call = dhd_inetaddr_notifier_call
246 };
247 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
248  * created in kernel notifier link list (with 'next' pointing to itself)
249  */
250 static bool dhd_inetaddr_notifier_registered = FALSE;
251 #endif /* ARP_OFFLOAD_SUPPORT */
252
253 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
254 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
255         unsigned long event, void *ptr);
256 static struct notifier_block dhd_inet6addr_notifier = {
257         .notifier_call = dhd_inet6addr_notifier_call
258 };
259 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
260  * created in kernel notifier link list (with 'next' pointing to itself)
261  */
262 static bool dhd_inet6addr_notifier_registered = FALSE;
263 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
264
265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
266 #include <linux/suspend.h>
267 volatile bool dhd_mmc_suspend = FALSE;
268 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
269 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
270
271 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
272 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
273 #endif 
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
275 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
276 #endif 
277 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
278 MODULE_LICENSE("GPL and additional rights");
279 #endif /* LinuxVer */
280
281 #include <dhd_bus.h>
282
283 #ifdef BCM_FD_AGGR
284 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
285 #else
286 #ifndef PROP_TXSTATUS
287 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
288 #else
289 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
290 #endif
291 #endif /* BCM_FD_AGGR */
292
293 #ifdef PROP_TXSTATUS
294 extern bool dhd_wlfc_skip_fc(void);
295 extern void dhd_wlfc_plat_init(void *dhd);
296 extern void dhd_wlfc_plat_deinit(void *dhd);
297 #endif /* PROP_TXSTATUS */
298 #ifdef USE_DYNAMIC_F2_BLKSIZE
299 extern uint sd_f2_blocksize;
300 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
301 #endif /* USE_DYNAMIC_F2_BLKSIZE */
302
303 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
304 const char *
305 print_tainted()
306 {
307         return "";
308 }
309 #endif  /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
310
311 /* Linux wireless extension support */
312 #if defined(WL_WIRELESS_EXT)
313 #include <wl_iw.h>
314 extern wl_iw_extra_params_t  g_wl_iw_params;
315 #endif /* defined(WL_WIRELESS_EXT) */
316
317 #ifdef CONFIG_PARTIALSUSPEND_SLP
318 #include <linux/partialsuspend_slp.h>
319 #define CONFIG_HAS_EARLYSUSPEND
320 #define DHD_USE_EARLYSUSPEND
321 #define register_early_suspend          register_pre_suspend
322 #define unregister_early_suspend        unregister_pre_suspend
323 #define early_suspend                           pre_suspend
324 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN                50
325 #else
326 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
327 #include <linux/earlysuspend.h>
328 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
329 #endif /* CONFIG_PARTIALSUSPEND_SLP */
330
331 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
332
333 #ifdef PKT_FILTER_SUPPORT
334 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
335 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
336 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
337 #endif
338
339
340 #ifdef READ_MACADDR
341 extern int dhd_read_macaddr(struct dhd_info *dhd);
342 #else
343 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
344 #endif
345 #ifdef WRITE_MACADDR
346 extern int dhd_write_macaddr(struct ether_addr *mac);
347 #else
348 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
349 #endif
350
351
352
353
354
355 #ifdef DHD_FW_COREDUMP
356 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
357 #endif /* DHD_FW_COREDUMP */
358 #ifdef DHD_LOG_DUMP
359 static void dhd_log_dump_init(dhd_pub_t *dhd);
360 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
361 static void dhd_log_dump(void *handle, void *event_info, u8 event);
362 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
363 static int do_dhd_log_dump(dhd_pub_t *dhdp);
364 #endif /* DHD_LOG_DUMP */
365
366 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
367 static struct notifier_block dhd_reboot_notifier = {
368         .notifier_call = dhd_reboot_callback,
369         .priority = 1,
370 };
371
372 #ifdef BCMPCIE
373 static int is_reboot = 0;
374 #endif /* BCMPCIE */
375
376 typedef struct dhd_if_event {
377         struct list_head        list;
378         wl_event_data_if_t      event;
379         char                    name[IFNAMSIZ+1];
380         uint8                   mac[ETHER_ADDR_LEN];
381 } dhd_if_event_t;
382
383 /* Interface control information */
384 typedef struct dhd_if {
385         struct dhd_info *info;                  /* back pointer to dhd_info */
386         /* OS/stack specifics */
387         struct net_device *net;
388         int                             idx;                    /* iface idx in dongle */
389         uint                    subunit;                /* subunit */
390         uint8                   mac_addr[ETHER_ADDR_LEN];       /* assigned MAC address */
391         bool                    set_macaddress;
392         bool                    set_multicast;
393         uint8                   bssidx;                 /* bsscfg index for the interface */
394         bool                    attached;               /* Delayed attachment when unset */
395         bool                    txflowcontrol;  /* Per interface flow control indicator */
396         char                    name[IFNAMSIZ+1]; /* linux interface name */
397         char                    dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
398         struct net_device_stats stats;
399 #ifdef DHD_WMF
400         dhd_wmf_t               wmf;            /* per bsscfg wmf setting */
401 #endif /* DHD_WMF */
402 #ifdef PCIE_FULL_DONGLE
403         struct list_head sta_list;              /* sll of associated stations */
404 #if !defined(BCM_GMAC3)
405         spinlock_t      sta_list_lock;          /* lock for manipulating sll */
406 #endif /* ! BCM_GMAC3 */
407 #endif /* PCIE_FULL_DONGLE */
408         uint32  ap_isolate;                     /* ap-isolation settings */
409 #ifdef DHD_L2_FILTER
410         bool parp_enable;
411         bool parp_discard;
412         bool parp_allnode;
413         arp_table_t *phnd_arp_table;
414 /* for Per BSS modification */
415         bool dhcp_unicast;
416         bool block_ping;
417         bool grat_arp;
418 #endif /* DHD_L2_FILTER */
419 } dhd_if_t;
420
421 #ifdef WLMEDIA_HTSF
422 typedef struct {
423         uint32 low;
424         uint32 high;
425 } tsf_t;
426
427 typedef struct {
428         uint32 last_cycle;
429         uint32 last_sec;
430         uint32 last_tsf;
431         uint32 coef;     /* scaling factor */
432         uint32 coefdec1; /* first decimal  */
433         uint32 coefdec2; /* second decimal */
434 } htsf_t;
435
436 typedef struct {
437         uint32 t1;
438         uint32 t2;
439         uint32 t3;
440         uint32 t4;
441 } tstamp_t;
442
443 static tstamp_t ts[TSMAX];
444 static tstamp_t maxdelayts;
445 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
446
447 #endif  /* WLMEDIA_HTSF */
448
449 struct ipv6_work_info_t {
450         uint8                   if_idx;
451         char                    ipv6_addr[16];
452         unsigned long           event;
453 };
454
455 #ifdef DHD_DEBUG
456 typedef struct dhd_dump {
457         uint8 *buf;
458         int bufsize;
459 } dhd_dump_t;
460 #endif /* DHD_DEBUG */
461
462 /* When Perimeter locks are deployed, any blocking calls must be preceeded
463  * with a PERIM UNLOCK and followed by a PERIM LOCK.
464  * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
465  * wait_event_timeout().
466  */
467
468 /* Local private structure (extension of pub) */
469 typedef struct dhd_info {
470 #if defined(WL_WIRELESS_EXT)
471         wl_iw_t         iw;             /* wireless extensions state (must be first) */
472 #endif /* defined(WL_WIRELESS_EXT) */
473         dhd_pub_t pub;
474         dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
475
476         void *adapter;                  /* adapter information, interrupt, fw path etc. */
477         char fw_path[PATH_MAX];         /* path to firmware image */
478         char nv_path[PATH_MAX];         /* path to nvram vars file */
479         char conf_path[PATH_MAX];       /* path to config vars file */
480
481         /* serialize dhd iovars */
482         struct mutex dhd_iovar_mutex;
483
484         struct semaphore proto_sem;
485 #ifdef PROP_TXSTATUS
486         spinlock_t      wlfc_spinlock;
487
488 #endif /* PROP_TXSTATUS */
489 #ifdef WLMEDIA_HTSF
490         htsf_t  htsf;
491 #endif
492         wait_queue_head_t ioctl_resp_wait;
493         wait_queue_head_t d3ack_wait;
494         wait_queue_head_t dhd_bus_busy_state_wait;
495         uint32  default_wd_interval;
496
497         struct timer_list timer;
498         bool wd_timer_valid;
499 #ifdef DHD_PCIE_RUNTIMEPM
500         struct timer_list rpm_timer;
501         bool rpm_timer_valid;
502         tsk_ctl_t         thr_rpm_ctl;
503 #endif /* DHD_PCIE_RUNTIMEPM */
504         struct tasklet_struct tasklet;
505         spinlock_t      sdlock;
506         spinlock_t      txqlock;
507         spinlock_t      dhd_lock;
508
509         struct semaphore sdsem;
510         tsk_ctl_t       thr_dpc_ctl;
511         tsk_ctl_t       thr_wdt_ctl;
512
513         tsk_ctl_t       thr_rxf_ctl;
514         spinlock_t      rxf_lock;
515         bool            rxthread_enabled;
516
517         /* Wakelocks */
518 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
519         struct wake_lock wl_wifi;   /* Wifi wakelock */
520         struct wake_lock wl_rxwake; /* Wifi rx wakelock */
521         struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
522         struct wake_lock wl_wdwake; /* Wifi wd wakelock */
523         struct wake_lock wl_evtwake; /* Wifi event wakelock */
524 #ifdef BCMPCIE_OOB_HOST_WAKE
525         struct wake_lock wl_intrwake; /* Host wakeup wakelock */
526 #endif /* BCMPCIE_OOB_HOST_WAKE */
527 #ifdef DHD_USE_SCAN_WAKELOCK
528         struct wake_lock wl_scanwake;  /* Wifi scan wakelock */
529 #endif /* DHD_USE_SCAN_WAKELOCK */
530 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
531
532 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
533         /* net_device interface lock, prevent race conditions among net_dev interface
534          * calls and wifi_on or wifi_off
535          */
536         struct mutex dhd_net_if_mutex;
537         struct mutex dhd_suspend_mutex;
538 #endif
539         spinlock_t wakelock_spinlock;
540         spinlock_t wakelock_evt_spinlock;
541         uint32 wakelock_event_counter;
542         uint32 wakelock_counter;
543         int wakelock_wd_counter;
544         int wakelock_rx_timeout_enable;
545         int wakelock_ctrl_timeout_enable;
546         bool waive_wakelock;
547         uint32 wakelock_before_waive;
548
549         /* Thread to issue ioctl for multicast */
550         wait_queue_head_t ctrl_wait;
551         atomic_t pend_8021x_cnt;
552         dhd_attach_states_t dhd_state;
553 #ifdef SHOW_LOGTRACE
554         dhd_event_log_t event_data;
555 #endif /* SHOW_LOGTRACE */
556
557 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
558         struct early_suspend early_suspend;
559 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
560
561 #ifdef ARP_OFFLOAD_SUPPORT
562         u32 pend_ipaddr;
563 #endif /* ARP_OFFLOAD_SUPPORT */
564 #ifdef BCM_FD_AGGR
565         void *rpc_th;
566         void *rpc_osh;
567         struct timer_list rpcth_timer;
568         bool rpcth_timer_active;
569         uint8 fdaggr;
570 #endif
571 #ifdef DHDTCPACK_SUPPRESS
572         spinlock_t      tcpack_lock;
573 #endif /* DHDTCPACK_SUPPRESS */
574 #ifdef FIX_CPU_MIN_CLOCK
575         bool cpufreq_fix_status;
576         struct mutex cpufreq_fix;
577         struct pm_qos_request dhd_cpu_qos;
578 #ifdef FIX_BUS_MIN_CLOCK
579         struct pm_qos_request dhd_bus_qos;
580 #endif /* FIX_BUS_MIN_CLOCK */
581 #endif /* FIX_CPU_MIN_CLOCK */
582         void                    *dhd_deferred_wq;
583 #ifdef DEBUG_CPU_FREQ
584         struct notifier_block freq_trans;
585         int __percpu *new_freq;
586 #endif
587         unsigned int unit;
588         struct notifier_block pm_notifier;
589 #ifdef DHD_PSTA
590         uint32  psta_mode;      /* PSTA or PSR */
591 #endif /* DHD_PSTA */
592 #ifdef DHD_DEBUG
593         dhd_dump_t *dump;
594         struct timer_list join_timer;
595         u32 join_timeout_val;
596         bool join_timer_active;
597         uint scan_time_count;
598         struct timer_list scan_timer;
599         bool scan_timer_active;
600 #endif
601 #if defined(DHD_LB)
602         /* CPU Load Balance dynamic CPU selection */
603
604         /* Variable that tracks the currect CPUs available for candidacy */
605         cpumask_var_t cpumask_curr_avail;
606
607         /* Primary and secondary CPU mask */
608         cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
609         cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
610
611         struct notifier_block cpu_notifier;
612
613         /* Tasklet to handle Tx Completion packet freeing */
614         struct tasklet_struct tx_compl_tasklet;
615         atomic_t        tx_compl_cpu;
616
617
618         /* Tasklet to handle RxBuf Post during Rx completion */
619         struct tasklet_struct rx_compl_tasklet;
620         atomic_t        rx_compl_cpu;
621
622         /* Napi struct for handling rx packet sendup. Packets are removed from
623          * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
624          * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
625          * to run to rx_napi_cpu.
626          */
627         struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
628         struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
629         struct napi_struct    rx_napi_struct ____cacheline_aligned;
630         atomic_t        rx_napi_cpu; /* cpu on which the napi is dispatched */
631         struct net_device    *rx_napi_netdev; /* netdev of primary interface */
632
633         struct work_struct    rx_napi_dispatcher_work;
634         struct work_struct    tx_compl_dispatcher_work;
635         struct work_struct    rx_compl_dispatcher_work;
636         /* Number of times DPC Tasklet ran */
637         uint32  dhd_dpc_cnt;
638
639         /* Number of times NAPI processing got scheduled */
640         uint32  napi_sched_cnt;
641
642         /* Number of times NAPI processing ran on each available core */
643         uint32  napi_percpu_run_cnt[NR_CPUS];
644
645         /* Number of times RX Completions got scheduled */
646         uint32  rxc_sched_cnt;
647         /* Number of times RX Completion ran on each available core */
648         uint32  rxc_percpu_run_cnt[NR_CPUS];
649
650         /* Number of times TX Completions got scheduled */
651         uint32  txc_sched_cnt;
652         /* Number of times TX Completions ran on each available core */
653         uint32  txc_percpu_run_cnt[NR_CPUS];
654
655         /* CPU status */
656         /* Number of times each CPU came online */
657         uint32  cpu_online_cnt[NR_CPUS];
658
659         /* Number of times each CPU went offline */
660         uint32  cpu_offline_cnt[NR_CPUS];
661
662         /*
663          * Consumer Histogram - NAPI RX Packet processing
664          * -----------------------------------------------
665          * On Each CPU, when the NAPI RX Packet processing call back was invoked
666          * how many packets were processed is captured in this data structure.
667          * Now its difficult to capture the "exact" number of packets processed.
668          * So considering the packet counter to be a 32 bit one, we have a
669          * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
670          * processed is rounded off to the next power of 2 and put in the
671          * approriate "bin" the value in the bin gets incremented.
672          * For example, assume that in CPU 1 if NAPI Rx runs 3 times
673          * and the packet count processed is as follows (assume the bin counters are 0)
674          * iteration 1 - 10 (the bin counter 2^4 increments to 1)
675          * iteration 2 - 30 (the bin counter 2^5 increments to 1)
676          * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
677          */
678         uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
679         uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
680         uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
681 #endif /* DHD_LB */
682
683 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
684 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
685
686         struct kobject dhd_kobj;
687 #ifdef SUPPORT_SENSORHUB
688         uint32 shub_enable;
689 #endif /* SUPPORT_SENSORHUB */
690
691         struct delayed_work dhd_memdump_work;
692 } dhd_info_t;
693
694 #define DHDIF_FWDER(dhdif)      FALSE
695
696 /* Flag to indicate if we should download firmware on driver load */
697 uint dhd_download_fw_on_driverload = TRUE;
698
699 /* Flag to indicate if driver is initialized */
700 uint dhd_driver_init_done = FALSE;
701
702 /* Definitions to provide path to the firmware and nvram
703  * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
704  */
705 char firmware_path[MOD_PARAM_PATHLEN];
706 char nvram_path[MOD_PARAM_PATHLEN];
707 char config_path[MOD_PARAM_PATHLEN];
708
709 /* backup buffer for firmware and nvram path */
710 char fw_bak_path[MOD_PARAM_PATHLEN];
711 char nv_bak_path[MOD_PARAM_PATHLEN];
712
713 /* information string to keep firmware, chio, cheip version info visiable from log */
714 char info_string[MOD_PARAM_INFOLEN];
715 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
716 int op_mode = 0;
717 int disable_proptx = 0;
718 module_param(op_mode, int, 0644);
719
720 #if defined(DHD_LB_RXP)
721 static int dhd_napi_weight = 32;
722 module_param(dhd_napi_weight, int, 0644);
723 #endif /* DHD_LB_RXP */
724
725 extern int wl_control_wl_start(struct net_device *dev);
726 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
727 struct semaphore dhd_registration_sem;
728 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
729
730 /* deferred handlers */
731 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
732 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
733 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
734 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
735 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
736 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
737 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
738 #ifdef WL_CFG80211
739 extern void dhd_netdev_free(struct net_device *ndev);
740 #endif /* WL_CFG80211 */
741
742 /* Error bits */
743 module_param(dhd_msg_level, int, 0);
744 #if defined(WL_WIRELESS_EXT)
745 module_param(iw_msg_level, int, 0);
746 #endif
747 #ifdef WL_CFG80211
748 module_param(wl_dbg_level, int, 0);
749 #endif
750 module_param(android_msg_level, int, 0);
751 module_param(config_msg_level, int, 0);
752
753 #ifdef ARP_OFFLOAD_SUPPORT
754 /* ARP offload enable */
755 uint dhd_arp_enable = TRUE;
756 module_param(dhd_arp_enable, uint, 0);
757
758 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
759
760 #ifdef ENABLE_ARP_SNOOP_MODE
761 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
762 #else
763 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
764 #endif  /* ENABLE_ARP_SNOOP_MODE */
765
766 module_param(dhd_arp_mode, uint, 0);
767 #endif /* ARP_OFFLOAD_SUPPORT */
768
769 /* Disable Prop tx */
770 module_param(disable_proptx, int, 0644);
771 /* load firmware and/or nvram values from the filesystem */
772 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
773 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
774 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
775
776 /* Watchdog interval */
777
778 /* extend watchdog expiration to 2 seconds when DPC is running */
779 #define WATCHDOG_EXTEND_INTERVAL (2000)
780
781 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
782 module_param(dhd_watchdog_ms, uint, 0);
783
784 #ifdef DHD_PCIE_RUNTIMEPM
785 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
786 #endif /* DHD_PCIE_RUNTIMEPMT */
787 #if defined(DHD_DEBUG)
788 /* Console poll interval */
789 uint dhd_console_ms = 0;
790 module_param(dhd_console_ms, uint, 0644);
791 #endif /* defined(DHD_DEBUG) */
792
793
794 uint dhd_slpauto = TRUE;
795 module_param(dhd_slpauto, uint, 0);
796
797 #ifdef PKT_FILTER_SUPPORT
798 /* Global Pkt filter enable control */
799 uint dhd_pkt_filter_enable = TRUE;
800 module_param(dhd_pkt_filter_enable, uint, 0);
801 #endif
802
803 /* Pkt filter init setup */
804 uint dhd_pkt_filter_init = 0;
805 module_param(dhd_pkt_filter_init, uint, 0);
806
807 /* Pkt filter mode control */
808 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
809 uint dhd_master_mode = FALSE;
810 #else
811 uint dhd_master_mode = FALSE;
812 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
813 module_param(dhd_master_mode, uint, 0);
814
815 int dhd_watchdog_prio = 0;
816 module_param(dhd_watchdog_prio, int, 0);
817
818 /* DPC thread priority */
819 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
820 module_param(dhd_dpc_prio, int, 0);
821
822 /* RX frame thread priority */
823 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
824 module_param(dhd_rxf_prio, int, 0);
825
826 int passive_channel_skip = 0;
827 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
828
829 #if !defined(BCMDHDUSB)
830 extern int dhd_dongle_ramsize;
831 module_param(dhd_dongle_ramsize, int, 0);
832 #endif /* BCMDHDUSB */
833
834 /* Keep track of number of instances */
835 static int dhd_found = 0;
836 static int instance_base = 0; /* Starting instance number */
837 module_param(instance_base, int, 0644);
838
839 /* Functions to manage sysfs interface for dhd */
840 static int dhd_sysfs_init(dhd_info_t *dhd);
841 static void dhd_sysfs_exit(dhd_info_t *dhd);
842
843 #if defined(DHD_LB)
844
845 static void
846 dhd_lb_set_default_cpus(dhd_info_t *dhd)
847 {
848         /* Default CPU allocation for the jobs */
849         atomic_set(&dhd->rx_napi_cpu, 1);
850         atomic_set(&dhd->rx_compl_cpu, 2);
851         atomic_set(&dhd->tx_compl_cpu, 2);
852 }
853
854 static void
855 dhd_cpumasks_deinit(dhd_info_t *dhd)
856 {
857         free_cpumask_var(dhd->cpumask_curr_avail);
858         free_cpumask_var(dhd->cpumask_primary);
859         free_cpumask_var(dhd->cpumask_primary_new);
860         free_cpumask_var(dhd->cpumask_secondary);
861         free_cpumask_var(dhd->cpumask_secondary_new);
862 }
863
864 static int
865 dhd_cpumasks_init(dhd_info_t *dhd)
866 {
867         int id;
868         uint32 cpus;
869         int ret = 0;
870
871         if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
872                 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
873                 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
874                 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
875                 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
876                 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
877                 ret = -ENOMEM;
878                 goto fail;
879         }
880
881         cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
882         cpumask_clear(dhd->cpumask_primary);
883         cpumask_clear(dhd->cpumask_secondary);
884
885         cpus = DHD_LB_PRIMARY_CPUS;
886         for (id = 0; id < NR_CPUS; id++) {
887                 if (isset(&cpus, id))
888                         cpumask_set_cpu(id, dhd->cpumask_primary);
889         }
890
891         cpus = DHD_LB_SECONDARY_CPUS;
892         for (id = 0; id < NR_CPUS; id++) {
893                 if (isset(&cpus, id))
894                         cpumask_set_cpu(id, dhd->cpumask_secondary);
895         }
896
897         return ret;
898 fail:
899         dhd_cpumasks_deinit(dhd);
900         return ret;
901 }
902
903 /*
904  * The CPU Candidacy Algorithm
905  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
906  * The available CPUs for selection are divided into two groups
907  *  Primary Set - A CPU mask that carries the First Choice CPUs
908  *  Secondary Set - A CPU mask that carries the Second Choice CPUs.
909  *
910  * There are two types of Job, that needs to be assigned to
911  * the CPUs, from one of the above mentioned CPU group. The Jobs are
912  * 1) Rx Packet Processing - napi_cpu
913  * 2) Completion Processiong (Tx, RX) - compl_cpu
914  *
915  * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
916  * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
917  * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
918  * If there are more processors free, it assigns one to compl_cpu.
919  * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
920  * CPU, as much as possible.
921  *
922  * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
923  * would allow Tx completion skb's to be released into a local free pool from
924  * which the rx buffer posts could have been serviced. it is important to note
925  * that a Tx packet may not have a large enough buffer for rx posting.
926  */
927 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
928 {
929         uint32 primary_available_cpus; /* count of primary available cpus */
930         uint32 secondary_available_cpus; /* count of secondary available cpus */
931         uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
932         uint32 compl_cpu = 0; /* cpu selected for completion jobs */
933
934         cpumask_clear(dhd->cpumask_primary_new);
935         cpumask_clear(dhd->cpumask_secondary_new);
936
937         /*
938          * Now select from the primary mask. Even if a Job is
939          * already running on a CPU in secondary group, we still move
940          * to primary CPU. So no conditional checks.
941          */
942         cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
943                 dhd->cpumask_curr_avail);
944
945         cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
946                 dhd->cpumask_curr_avail);
947
948         primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
949
950         if (primary_available_cpus > 0) {
951                 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
952
953                 /* If no further CPU is available,
954                  * cpumask_next returns >= nr_cpu_ids
955                  */
956                 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
957                 if (compl_cpu >= nr_cpu_ids)
958                         compl_cpu = 0;
959         }
960
961         DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
962                 __FUNCTION__, napi_cpu, compl_cpu));
963
964         /* -- Now check for the CPUs from the secondary mask -- */
965         secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
966
967         DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
968                 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
969
970         if (secondary_available_cpus > 0) {
971                 /* At this point if napi_cpu is unassigned it means no CPU
972                  * is online from Primary Group
973                  */
974                 if (napi_cpu == 0) {
975                         napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
976                         compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
977                 } else if (compl_cpu == 0) {
978                         compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
979                 }
980
981                 /* If no CPU was available for completion, choose CPU 0 */
982                 if (compl_cpu >= nr_cpu_ids)
983                         compl_cpu = 0;
984         }
985         if ((primary_available_cpus == 0) &&
986                 (secondary_available_cpus == 0)) {
987                 /* No CPUs available from primary or secondary mask */
988                 napi_cpu = 0;
989                 compl_cpu = 0;
990         }
991
992         DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
993                 __FUNCTION__, napi_cpu, compl_cpu));
994         ASSERT(napi_cpu < nr_cpu_ids);
995         ASSERT(compl_cpu < nr_cpu_ids);
996
997         atomic_set(&dhd->rx_napi_cpu, napi_cpu);
998         atomic_set(&dhd->tx_compl_cpu, compl_cpu);
999         atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1000         return;
1001 }
1002
1003 /*
1004  * Function to handle CPU Hotplug notifications.
1005  * One of the task it does is to trigger the CPU Candidacy algorithm
1006  * for load balancing.
1007  */
1008 int
1009 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1010 {
1011         unsigned int cpu = (unsigned int)(long)hcpu;
1012
1013         dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1014
1015         switch (action)
1016         {
1017                 case CPU_ONLINE:
1018                         DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1019                         cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1020                         dhd_select_cpu_candidacy(dhd);
1021                         break;
1022
1023                 case CPU_DOWN_PREPARE:
1024                 case CPU_DOWN_PREPARE_FROZEN:
1025                         DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1026                         cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1027                         dhd_select_cpu_candidacy(dhd);
1028                         break;
1029                 default:
1030                         break;
1031         }
1032
1033         return NOTIFY_OK;
1034 }
1035
1036 #if defined(DHD_LB_STATS)
1037 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1038 {
1039         dhd_info_t *dhd;
1040         int i, j;
1041
1042         if (dhdp == NULL) {
1043                 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1044                         __FUNCTION__));
1045                 return;
1046         }
1047
1048         dhd = dhdp->info;
1049         if (dhd == NULL) {
1050                 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1051                 return;
1052         }
1053
1054         DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1055         DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1056         DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1057         DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1058
1059         for (i = 0; i < NR_CPUS; i++) {
1060                 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1061                 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1062                 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1063
1064                 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1065                 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1066         }
1067
1068         for (i = 0; i < NR_CPUS; i++) {
1069                 for (j = 0; j < HIST_BIN_SIZE; j++) {
1070                         DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1071                         DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1072                         DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1073                 }
1074         }
1075
1076         return;
1077 }
1078
1079 static void dhd_lb_stats_dump_histo(
1080         struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1081 {
1082         int i, j;
1083         uint32 per_cpu_total[NR_CPUS] = {0};
1084         uint32 total = 0;
1085
1086         bcm_bprintf(strbuf, "CPU: \t\t");
1087         for (i = 0; i < num_possible_cpus(); i++)
1088                 bcm_bprintf(strbuf, "%d\t", i);
1089         bcm_bprintf(strbuf, "\nBin\n");
1090
1091         for (i = 0; i < HIST_BIN_SIZE; i++) {
1092                 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1093                 for (j = 0; j < num_possible_cpus(); j++) {
1094                         bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1095                 }
1096                 bcm_bprintf(strbuf, "\n");
1097         }
1098         bcm_bprintf(strbuf, "Per CPU Total \t");
1099         total = 0;
1100         for (i = 0; i < num_possible_cpus(); i++) {
1101                 for (j = 0; j < HIST_BIN_SIZE; j++) {
1102                         per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1103                 }
1104                 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1105                 total += per_cpu_total[i];
1106         }
1107         bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1108
1109         return;
1110 }
1111
1112 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1113 {
1114         int i;
1115
1116         bcm_bprintf(strbuf, "CPU: \t");
1117         for (i = 0; i < num_possible_cpus(); i++)
1118                 bcm_bprintf(strbuf, "%d\t", i);
1119         bcm_bprintf(strbuf, "\n");
1120
1121         bcm_bprintf(strbuf, "Val: \t");
1122         for (i = 0; i < num_possible_cpus(); i++)
1123                 bcm_bprintf(strbuf, "%u\t", *(p+i));
1124         bcm_bprintf(strbuf, "\n");
1125         return;
1126 }
1127
1128 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1129 {
1130         dhd_info_t *dhd;
1131
1132         if (dhdp == NULL || strbuf == NULL) {
1133                 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1134                         __FUNCTION__, dhdp, strbuf));
1135                 return;
1136         }
1137
1138         dhd = dhdp->info;
1139         if (dhd == NULL) {
1140                 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1141                 return;
1142         }
1143
1144         bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1145         dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1146
1147         bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1148         dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1149
1150         bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1151                 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1152                 dhd->txc_sched_cnt);
1153 #ifdef DHD_LB_RXP
1154         bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1155         dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1156         bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1157         dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1158 #endif /* DHD_LB_RXP */
1159
1160 #ifdef DHD_LB_RXC
1161         bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1162         dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1163         bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1164         dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1165 #endif /* DHD_LB_RXC */
1166
1167
1168 #ifdef DHD_LB_TXC
1169         bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1170         dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1171         bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1172         dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1173 #endif /* DHD_LB_TXC */
1174 }
1175
1176 static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1177 {
1178         uint32 bin_power;
1179         uint32 *p = NULL;
1180
1181         bin_power = next_larger_power2(count);
1182
1183         switch (bin_power) {
1184                 case   0: break;
1185                 case   1: /* Fall through intentionally */
1186                 case   2: p = bin + 0; break;
1187                 case   4: p = bin + 1; break;
1188                 case   8: p = bin + 2; break;
1189                 case  16: p = bin + 3; break;
1190                 case  32: p = bin + 4; break;
1191                 case  64: p = bin + 5; break;
1192                 case 128: p = bin + 6; break;
1193                 default : p = bin + 7; break;
1194         }
1195         if (p)
1196                 *p = *p + 1;
1197         return;
1198 }
1199
1200 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1201 {
1202         int cpu;
1203         dhd_info_t *dhd = dhdp->info;
1204
1205         cpu = get_cpu();
1206         put_cpu();
1207         dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1208
1209         return;
1210 }
1211
1212 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1213 {
1214         int cpu;
1215         dhd_info_t *dhd = dhdp->info;
1216
1217         cpu = get_cpu();
1218         put_cpu();
1219         dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1220
1221         return;
1222 }
1223
1224 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1225 {
1226         int cpu;
1227         dhd_info_t *dhd = dhdp->info;
1228
1229         cpu = get_cpu();
1230         put_cpu();
1231         dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1232
1233         return;
1234 }
1235
1236 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1237 {
1238         dhd_info_t *dhd = dhdp->info;
1239         DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1240 }
1241
1242 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1243 {
1244         dhd_info_t *dhd = dhdp->info;
1245         DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1246 }
1247
1248 #endif /* DHD_LB_STATS */
1249 #endif /* DHD_LB */
1250
1251
1252 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1253 int g_frameburst = 1;
1254 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1255
1256 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1257
1258 /* DHD Perimiter lock only used in router with bypass forwarding. */
1259 #define DHD_PERIM_RADIO_INIT()              do { /* noop */ } while (0)
1260 #define DHD_PERIM_LOCK_TRY(unit, flag)      do { /* noop */ } while (0)
1261 #define DHD_PERIM_UNLOCK_TRY(unit, flag)    do { /* noop */ } while (0)
1262
1263 #ifdef PCIE_FULL_DONGLE
1264 #if defined(BCM_GMAC3)
1265 #define DHD_IF_STA_LIST_LOCK_INIT(ifp)      do { /* noop */ } while (0)
1266 #define DHD_IF_STA_LIST_LOCK(ifp, flags)    ({ BCM_REFERENCE(flags); })
1267 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags)  ({ BCM_REFERENCE(flags); })
1268
1269 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1270 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1271 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1272 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1273
1274 #else /* ! BCM_GMAC3 */
1275 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1276 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1277         spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1278 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1279         spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1280
1281 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1282 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1283         struct list_head *snapshot_list);
1284 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1285 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1286 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1287 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1288
1289 #endif /* ! BCM_GMAC3 */
1290 #endif /* PCIE_FULL_DONGLE */
1291
1292 /* Control fw roaming */
1293 uint dhd_roam_disable = 0;
1294
1295 #ifdef BCMDBGFS
1296 extern int dhd_dbg_init(dhd_pub_t *dhdp);
1297 extern void dhd_dbg_remove(void);
1298 #endif
1299
1300 /* Control radio state */
1301 uint dhd_radio_up = 1;
1302
1303 /* Network inteface name */
1304 char iface_name[IFNAMSIZ] = {'\0'};
1305 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1306
1307 /* The following are specific to the SDIO dongle */
1308
1309 /* IOCTL response timeout */
1310 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1311
1312 /* Idle timeout for backplane clock */
1313 int dhd_idletime = DHD_IDLETIME_TICKS;
1314 module_param(dhd_idletime, int, 0);
1315
1316 /* Use polling */
1317 uint dhd_poll = FALSE;
1318 module_param(dhd_poll, uint, 0);
1319
1320 /* Use interrupts */
1321 uint dhd_intr = TRUE;
1322 module_param(dhd_intr, uint, 0);
1323
1324 /* SDIO Drive Strength (in milliamps) */
1325 uint dhd_sdiod_drive_strength = 6;
1326 module_param(dhd_sdiod_drive_strength, uint, 0);
1327
1328 #ifdef BCMSDIO
1329 /* Tx/Rx bounds */
1330 extern uint dhd_txbound;
1331 extern uint dhd_rxbound;
1332 module_param(dhd_txbound, uint, 0);
1333 module_param(dhd_rxbound, uint, 0);
1334
1335 /* Deferred transmits */
1336 extern uint dhd_deferred_tx;
1337 module_param(dhd_deferred_tx, uint, 0);
1338
1339 #endif /* BCMSDIO */
1340
1341
1342 #ifdef SDTEST
1343 /* Echo packet generator (pkts/s) */
1344 uint dhd_pktgen = 0;
1345 module_param(dhd_pktgen, uint, 0);
1346
1347 /* Echo packet len (0 => sawtooth, max 2040) */
1348 uint dhd_pktgen_len = 0;
1349 module_param(dhd_pktgen_len, uint, 0);
1350 #endif /* SDTEST */
1351
1352
1353
1354 /* Allow delayed firmware download for debug purpose */
1355 int allow_delay_fwdl = FALSE;
1356 module_param(allow_delay_fwdl, int, 0);
1357
1358 extern char dhd_version[];
1359 extern char fw_version[];
1360
1361 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1362 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1363 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1364 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1365 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1366
1367 #ifdef WLMEDIA_HTSF
1368 void htsf_update(dhd_info_t *dhd, void *data);
1369 tsf_t prev_tsf, cur_tsf;
1370
1371 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1372 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1373 static void dhd_dump_latency(void);
1374 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1375 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1376 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1377 #endif /* WLMEDIA_HTSF */
1378
1379 /* Monitor interface */
1380 int dhd_monitor_init(void *dhd_pub);
1381 int dhd_monitor_uninit(void);
1382
1383
1384 #if defined(WL_WIRELESS_EXT)
1385 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1386 #endif /* defined(WL_WIRELESS_EXT) */
1387
1388 static void dhd_dpc(ulong data);
1389 /* forward decl */
1390 extern int dhd_wait_pend8021x(struct net_device *dev);
1391 void dhd_os_wd_timer_extend(void *bus, bool extend);
1392
1393 #ifdef TOE
1394 #ifndef BDC
1395 #error TOE requires BDC
1396 #endif /* !BDC */
1397 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1398 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1399 #endif /* TOE */
1400
1401 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1402                              wl_event_msg_t *event_ptr, void **data_ptr);
1403
1404 #if defined(CONFIG_PM_SLEEP)
1405 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1406 {
1407         int ret = NOTIFY_DONE;
1408         bool suspend = FALSE;
1409         dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1410
1411         BCM_REFERENCE(dhdinfo);
1412
1413         switch (action) {
1414         case PM_HIBERNATION_PREPARE:
1415         case PM_SUSPEND_PREPARE:
1416                 suspend = TRUE;
1417                 break;
1418
1419         case PM_POST_HIBERNATION:
1420         case PM_POST_SUSPEND:
1421                 suspend = FALSE;
1422                 break;
1423         }
1424
1425 #if defined(SUPPORT_P2P_GO_PS)
1426 #ifdef PROP_TXSTATUS
1427         if (suspend) {
1428                 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1429                 dhd_wlfc_suspend(&dhdinfo->pub);
1430                 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1431         } else
1432                 dhd_wlfc_resume(&dhdinfo->pub);
1433 #endif /* PROP_TXSTATUS */
1434 #endif /* defined(SUPPORT_P2P_GO_PS) */
1435
1436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1437         KERNEL_VERSION(2, 6, 39))
1438         dhd_mmc_suspend = suspend;
1439         smp_mb();
1440 #endif
1441
1442         return ret;
1443 }
1444
1445 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1446  * created in kernel notifier link list (with 'next' pointing to itself)
1447  */
1448 static bool dhd_pm_notifier_registered = FALSE;
1449
1450 extern int register_pm_notifier(struct notifier_block *nb);
1451 extern int unregister_pm_notifier(struct notifier_block *nb);
1452 #endif /* CONFIG_PM_SLEEP */
1453
1454 /* Request scheduling of the bus rx frame */
1455 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1456 static void dhd_os_rxflock(dhd_pub_t *pub);
1457 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1458
1459 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1460 typedef struct dhd_dev_priv {
1461         dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1462         dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
1463         int          ifidx; /* interface index */
1464 } dhd_dev_priv_t;
1465
1466 #define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
1467 #define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
1468 #define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1469 #define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1470 #define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1471
1472 /** Clear the dhd net_device's private structure. */
1473 static inline void
1474 dhd_dev_priv_clear(struct net_device * dev)
1475 {
1476         dhd_dev_priv_t * dev_priv;
1477         ASSERT(dev != (struct net_device *)NULL);
1478         dev_priv = DHD_DEV_PRIV(dev);
1479         dev_priv->dhd = (dhd_info_t *)NULL;
1480         dev_priv->ifp = (dhd_if_t *)NULL;
1481         dev_priv->ifidx = DHD_BAD_IF;
1482 }
1483
1484 /** Setup the dhd net_device's private structure. */
1485 static inline void
1486 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1487                   int ifidx)
1488 {
1489         dhd_dev_priv_t * dev_priv;
1490         ASSERT(dev != (struct net_device *)NULL);
1491         dev_priv = DHD_DEV_PRIV(dev);
1492         dev_priv->dhd = dhd;
1493         dev_priv->ifp = ifp;
1494         dev_priv->ifidx = ifidx;
1495 }
1496
1497 #ifdef PCIE_FULL_DONGLE
1498
1499 /** Dummy objects are defined with state representing bad|down.
1500  * Performance gains from reducing branch conditionals, instruction parallelism,
1501  * dual issue, reducing load shadows, avail of larger pipelines.
1502  * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1503  * is accessed via the dhd_sta_t.
1504  */
1505
1506 /* Dummy dhd_info object */
1507 dhd_info_t dhd_info_null = {
1508 #if defined(BCM_GMAC3)
1509         .fwdh = FWDER_NULL,
1510 #endif
1511         .pub = {
1512                  .info = &dhd_info_null,
1513 #ifdef DHDTCPACK_SUPPRESS
1514                  .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1515 #endif /* DHDTCPACK_SUPPRESS */
1516                  .up = FALSE,
1517                  .busstate = DHD_BUS_DOWN
1518         }
1519 };
1520 #define DHD_INFO_NULL (&dhd_info_null)
1521 #define DHD_PUB_NULL  (&dhd_info_null.pub)
1522
1523 /* Dummy netdevice object */
1524 struct net_device dhd_net_dev_null = {
1525         .reg_state = NETREG_UNREGISTERED
1526 };
1527 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1528
1529 /* Dummy dhd_if object */
1530 dhd_if_t dhd_if_null = {
1531 #if defined(BCM_GMAC3)
1532         .fwdh = FWDER_NULL,
1533 #endif
1534 #ifdef WMF
1535         .wmf = { .wmf_enable = TRUE },
1536 #endif
1537         .info = DHD_INFO_NULL,
1538         .net = DHD_NET_DEV_NULL,
1539         .idx = DHD_BAD_IF
1540 };
1541 #define DHD_IF_NULL  (&dhd_if_null)
1542
1543 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1544
1545 /** Interface STA list management. */
1546
1547 /** Fetch the dhd_if object, given the interface index in the dhd. */
1548 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1549
1550 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1551 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1552 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1553
1554 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1555 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1556 static void     dhd_if_flush_sta(dhd_if_t * ifp);
1557
1558 /* Construct/Destruct a sta pool. */
1559 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1560 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1561 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1562 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1563
1564
1565 /* Return interface pointer */
1566 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1567 {
1568         ASSERT(ifidx < DHD_MAX_IFS);
1569
1570         if (ifidx >= DHD_MAX_IFS)
1571                 return NULL;
1572
1573         return dhdp->info->iflist[ifidx];
1574 }
1575
1576 /** Reset a dhd_sta object and free into the dhd pool. */
1577 static void
1578 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1579 {
1580         int prio;
1581
1582         ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1583
1584         ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1585
1586         /*
1587          * Flush and free all packets in all flowring's queues belonging to sta.
1588          * Packets in flow ring will be flushed later.
1589          */
1590         for (prio = 0; prio < (int)NUMPRIO; prio++) {
1591                 uint16 flowid = sta->flowid[prio];
1592
1593                 if (flowid != FLOWID_INVALID) {
1594                         unsigned long flags;
1595                         flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1596                         flow_ring_node_t * flow_ring_node;
1597
1598 #ifdef DHDTCPACK_SUPPRESS
1599                         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1600                          * when there is a newly coming packet from network stack.
1601                          */
1602                         dhd_tcpack_info_tbl_clean(dhdp);
1603 #endif /* DHDTCPACK_SUPPRESS */
1604
1605                         flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1606                         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1607                         flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1608
1609                         if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1610                                 void * pkt;
1611                                 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1612                                         PKTFREE(dhdp->osh, pkt, TRUE);
1613                                 }
1614                         }
1615
1616                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1617                         ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1618                 }
1619
1620                 sta->flowid[prio] = FLOWID_INVALID;
1621         }
1622
1623         id16_map_free(dhdp->staid_allocator, sta->idx);
1624         DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1625         sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1626         sta->ifidx = DHD_BAD_IF;
1627         bzero(sta->ea.octet, ETHER_ADDR_LEN);
1628         INIT_LIST_HEAD(&sta->list);
1629         sta->idx = ID16_INVALID; /* implying free */
1630 }
1631
1632 /** Allocate a dhd_sta object from the dhd pool. */
1633 static dhd_sta_t *
1634 dhd_sta_alloc(dhd_pub_t * dhdp)
1635 {
1636         uint16 idx;
1637         dhd_sta_t * sta;
1638         dhd_sta_pool_t * sta_pool;
1639
1640         ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1641
1642         idx = id16_map_alloc(dhdp->staid_allocator);
1643         if (idx == ID16_INVALID) {
1644                 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1645                 return DHD_STA_NULL;
1646         }
1647
1648         sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1649         sta = &sta_pool[idx];
1650
1651         ASSERT((sta->idx == ID16_INVALID) &&
1652                (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1653
1654         DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1655
1656         sta->idx = idx; /* implying allocated */
1657
1658         return sta;
1659 }
1660
1661 /** Delete all STAs in an interface's STA list. */
1662 static void
1663 dhd_if_del_sta_list(dhd_if_t *ifp)
1664 {
1665         dhd_sta_t *sta, *next;
1666         unsigned long flags;
1667
1668         DHD_IF_STA_LIST_LOCK(ifp, flags);
1669
1670         list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1671 #if defined(BCM_GMAC3)
1672                 if (ifp->fwdh) {
1673                         /* Remove sta from WOFA forwarder. */
1674                         fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1675                 }
1676 #endif /* BCM_GMAC3 */
1677                 list_del(&sta->list);
1678                 dhd_sta_free(&ifp->info->pub, sta);
1679         }
1680
1681         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1682
1683         return;
1684 }
1685
1686 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1687 static void
1688 dhd_if_flush_sta(dhd_if_t * ifp)
1689 {
1690 #if defined(BCM_GMAC3)
1691
1692         if (ifp && (ifp->fwdh != FWDER_NULL)) {
1693                 dhd_sta_t *sta, *next;
1694                 unsigned long flags;
1695
1696                 DHD_IF_STA_LIST_LOCK(ifp, flags);
1697
1698                 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1699                         /* Remove any sta entry from WOFA forwarder. */
1700                         fwder_flush(ifp->fwdh, (wofa_t)sta);
1701                 }
1702
1703                 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1704         }
1705 #endif /* BCM_GMAC3 */
1706 }
1707
1708 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1709 static int
1710 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1711 {
1712         int idx, prio, sta_pool_memsz;
1713         dhd_sta_t * sta;
1714         dhd_sta_pool_t * sta_pool;
1715         void * staid_allocator;
1716
1717         ASSERT(dhdp != (dhd_pub_t *)NULL);
1718         ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1719
1720         /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1721         staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1722         if (staid_allocator == NULL) {
1723                 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1724                 return BCME_ERROR;
1725         }
1726
1727         /* Pre allocate a pool of dhd_sta objects (one extra). */
1728         sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1729         sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1730         if (sta_pool == NULL) {
1731                 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1732                 id16_map_fini(dhdp->osh, staid_allocator);
1733                 return BCME_ERROR;
1734         }
1735
1736         dhdp->sta_pool = sta_pool;
1737         dhdp->staid_allocator = staid_allocator;
1738
1739         /* Initialize all sta(s) for the pre-allocated free pool. */
1740         bzero((uchar *)sta_pool, sta_pool_memsz);
1741         for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1742                 sta = &sta_pool[idx];
1743                 sta->idx = id16_map_alloc(staid_allocator);
1744                 ASSERT(sta->idx <= max_sta);
1745         }
1746         /* Now place them into the pre-allocated free pool. */
1747         for (idx = 1; idx <= max_sta; idx++) {
1748                 sta = &sta_pool[idx];
1749                 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1750                         sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1751                 }
1752                 dhd_sta_free(dhdp, sta);
1753         }
1754
1755         return BCME_OK;
1756 }
1757
1758 /** Destruct the pool of dhd_sta_t objects.
1759  * Caller must ensure that no STA objects are currently associated with an if.
1760  */
1761 static void
1762 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1763 {
1764         dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1765
1766         if (sta_pool) {
1767                 int idx;
1768                 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1769                 for (idx = 1; idx <= max_sta; idx++) {
1770                         ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1771                         ASSERT(sta_pool[idx].idx == ID16_INVALID);
1772                 }
1773                 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1774                 dhdp->sta_pool = NULL;
1775         }
1776
1777         id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1778         dhdp->staid_allocator = NULL;
1779 }
1780
1781 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1782 static void
1783 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1784 {
1785         int idx, prio, sta_pool_memsz;
1786         dhd_sta_t * sta;
1787         dhd_sta_pool_t * sta_pool;
1788         void *staid_allocator;
1789
1790         if (!dhdp) {
1791                 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1792                 return;
1793         }
1794
1795         sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1796         staid_allocator = dhdp->staid_allocator;
1797
1798         if (!sta_pool) {
1799                 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1800                 return;
1801         }
1802
1803         if (!staid_allocator) {
1804                 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1805                 return;
1806         }
1807
1808         /* clear free pool */
1809         sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1810         bzero((uchar *)sta_pool, sta_pool_memsz);
1811
1812         /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1813         id16_map_clear(staid_allocator, max_sta, 1);
1814
1815         /* Initialize all sta(s) for the pre-allocated free pool. */
1816         for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1817                 sta = &sta_pool[idx];
1818                 sta->idx = id16_map_alloc(staid_allocator);
1819                 ASSERT(sta->idx <= max_sta);
1820         }
1821         /* Now place them into the pre-allocated free pool. */
1822         for (idx = 1; idx <= max_sta; idx++) {
1823                 sta = &sta_pool[idx];
1824                 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1825                         sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1826                 }
1827                 dhd_sta_free(dhdp, sta);
1828         }
1829 }
1830
1831 /** Find STA with MAC address ea in an interface's STA list. */
1832 dhd_sta_t *
1833 dhd_find_sta(void *pub, int ifidx, void *ea)
1834 {
1835         dhd_sta_t *sta;
1836         dhd_if_t *ifp;
1837         unsigned long flags;
1838
1839         ASSERT(ea != NULL);
1840         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1841         if (ifp == NULL)
1842                 return DHD_STA_NULL;
1843
1844         DHD_IF_STA_LIST_LOCK(ifp, flags);
1845
1846         list_for_each_entry(sta, &ifp->sta_list, list) {
1847                 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1848                         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1849                         return sta;
1850                 }
1851         }
1852
1853         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1854
1855         return DHD_STA_NULL;
1856 }
1857
1858 /** Add STA into the interface's STA list. */
1859 dhd_sta_t *
1860 dhd_add_sta(void *pub, int ifidx, void *ea)
1861 {
1862         dhd_sta_t *sta;
1863         dhd_if_t *ifp;
1864         unsigned long flags;
1865
1866         ASSERT(ea != NULL);
1867         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1868         if (ifp == NULL)
1869                 return DHD_STA_NULL;
1870
1871         sta = dhd_sta_alloc((dhd_pub_t *)pub);
1872         if (sta == DHD_STA_NULL) {
1873                 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1874                 return DHD_STA_NULL;
1875         }
1876
1877         memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1878
1879         /* link the sta and the dhd interface */
1880         sta->ifp = ifp;
1881         sta->ifidx = ifidx;
1882         INIT_LIST_HEAD(&sta->list);
1883
1884         DHD_IF_STA_LIST_LOCK(ifp, flags);
1885
1886         list_add_tail(&sta->list, &ifp->sta_list);
1887
1888 #if defined(BCM_GMAC3)
1889         if (ifp->fwdh) {
1890                 ASSERT(ISALIGNED(ea, 2));
1891                 /* Add sta to WOFA forwarder. */
1892                 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1893         }
1894 #endif /* BCM_GMAC3 */
1895
1896         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1897
1898         return sta;
1899 }
1900
1901 /** Delete STA from the interface's STA list. */
1902 void
1903 dhd_del_sta(void *pub, int ifidx, void *ea)
1904 {
1905         dhd_sta_t *sta, *next;
1906         dhd_if_t *ifp;
1907         unsigned long flags;
1908
1909         ASSERT(ea != NULL);
1910         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1911         if (ifp == NULL)
1912                 return;
1913
1914         DHD_IF_STA_LIST_LOCK(ifp, flags);
1915
1916         list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1917                 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1918 #if defined(BCM_GMAC3)
1919                         if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1920                                 ASSERT(ISALIGNED(ea, 2));
1921                                 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1922                         }
1923 #endif /* BCM_GMAC3 */
1924                         list_del(&sta->list);
1925                         dhd_sta_free(&ifp->info->pub, sta);
1926                 }
1927         }
1928
1929         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1930 #ifdef DHD_L2_FILTER
1931         if (ifp->parp_enable) {
1932                 /* clear Proxy ARP cache of specific Ethernet Address */
1933                 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1934                         ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1935         }
1936 #endif /* DHD_L2_FILTER */
1937         return;
1938 }
1939
1940 /** Add STA if it doesn't exist. Not reentrant. */
1941 dhd_sta_t*
1942 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1943 {
1944         dhd_sta_t *sta;
1945
1946         sta = dhd_find_sta(pub, ifidx, ea);
1947
1948         if (!sta) {
1949                 /* Add entry */
1950                 sta = dhd_add_sta(pub, ifidx, ea);
1951         }
1952
1953         return sta;
1954 }
1955
1956 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1957 #if !defined(BCM_GMAC3)
1958 static struct list_head *
1959 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1960 {
1961         unsigned long flags;
1962         dhd_sta_t *sta, *snapshot;
1963
1964         INIT_LIST_HEAD(snapshot_list);
1965
1966         DHD_IF_STA_LIST_LOCK(ifp, flags);
1967
1968         list_for_each_entry(sta, &ifp->sta_list, list) {
1969                 /* allocate one and add to snapshot */
1970                 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1971                 if (snapshot == NULL) {
1972                         DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1973                         continue;
1974                 }
1975
1976                 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1977
1978                 INIT_LIST_HEAD(&snapshot->list);
1979                 list_add_tail(&snapshot->list, snapshot_list);
1980         }
1981
1982         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1983
1984         return snapshot_list;
1985 }
1986
1987 static void
1988 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1989 {
1990         dhd_sta_t *sta, *next;
1991
1992         list_for_each_entry_safe(sta, next, snapshot_list, list) {
1993                 list_del(&sta->list);
1994                 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1995         }
1996 }
1997 #endif /* !BCM_GMAC3 */
1998 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1999
2000 #else
2001 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2002 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2003 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2004 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2005 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2006 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2007 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2008 #endif /* PCIE_FULL_DONGLE */
2009
2010
2011 #if defined(DHD_LB)
2012
2013 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2014 /**
2015  * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2016  * CPU and schedules a tasklet.
2017  * @tasklet: opaque pointer to the tasklet
2018  */
2019 static INLINE void
2020 dhd_tasklet_schedule(void *tasklet)
2021 {
2022         tasklet_schedule((struct tasklet_struct *)tasklet);
2023 }
2024
2025 /**
2026  * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2027  * @tasklet: tasklet to be scheduled
2028  * @on_cpu: cpu core id
2029  *
2030  * If the requested cpu is online, then an IPI is sent to this cpu via the
2031  * smp_call_function_single with no wait and the tasklet_schedule function
2032  * will be invoked to schedule the specified tasklet on the requested CPU.
2033  */
2034 static void
2035 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2036 {
2037         const int wait = 0;
2038         smp_call_function_single(on_cpu,
2039                 dhd_tasklet_schedule, (void *)tasklet, wait);
2040 }
2041 #endif /* DHD_LB_TXC || DHD_LB_RXC */
2042
2043
2044 #if defined(DHD_LB_TXC)
2045 /**
2046  * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2047  * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2048  * freeing the packets placed in the tx_compl workq
2049  */
2050 void
2051 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2052 {
2053         dhd_info_t *dhd = dhdp->info;
2054         int curr_cpu, on_cpu;
2055
2056         if (dhd->rx_napi_netdev == NULL) {
2057                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2058                 return;
2059         }
2060
2061         DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2062         /*
2063          * If the destination CPU is NOT online or is same as current CPU
2064          * no need to schedule the work
2065          */
2066         curr_cpu = get_cpu();
2067         put_cpu();
2068
2069         on_cpu = atomic_read(&dhd->tx_compl_cpu);
2070
2071         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2072                 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2073         } else {
2074                 schedule_work(&dhd->tx_compl_dispatcher_work);
2075         }
2076 }
2077
2078 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2079 {
2080         struct dhd_info *dhd =
2081                 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2082         int cpu;
2083
2084         get_online_cpus();
2085         cpu = atomic_read(&dhd->tx_compl_cpu);
2086         if (!cpu_online(cpu))
2087                 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2088         else
2089                 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2090         put_online_cpus();
2091 }
2092
2093 #endif /* DHD_LB_TXC */
2094
2095
2096 #if defined(DHD_LB_RXC)
2097 /**
2098  * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2099  * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2100  * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2101  * placed in the rx_compl workq.
2102  *
2103  * @dhdp: pointer to dhd_pub object
2104  */
2105 void
2106 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2107 {
2108         dhd_info_t *dhd = dhdp->info;
2109         int curr_cpu, on_cpu;
2110
2111         if (dhd->rx_napi_netdev == NULL) {
2112                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2113                 return;
2114         }
2115
2116         DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2117         /*
2118          * If the destination CPU is NOT online or is same as current CPU
2119          * no need to schedule the work
2120          */
2121         curr_cpu = get_cpu();
2122         put_cpu();
2123
2124         on_cpu = atomic_read(&dhd->rx_compl_cpu);
2125
2126         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2127                 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2128         } else {
2129                 schedule_work(&dhd->rx_compl_dispatcher_work);
2130         }
2131 }
2132
2133 static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2134 {
2135         struct dhd_info *dhd =
2136                 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2137         int cpu;
2138
2139         get_online_cpus();
2140         cpu = atomic_read(&dhd->tx_compl_cpu);
2141         if (!cpu_online(cpu))
2142                 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2143         else
2144                 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2145         put_online_cpus();
2146 }
2147
2148 #endif /* DHD_LB_RXC */
2149
2150
2151 #if defined(DHD_LB_RXP)
2152 /**
2153  * dhd_napi_poll - Load balance napi poll function to process received
2154  * packets and send up the network stack using netif_receive_skb()
2155  *
2156  * @napi: napi object in which context this poll function is invoked
2157  * @budget: number of packets to be processed.
2158  *
2159  * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2160  * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2161  * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2162  * packet tag and sendup.
2163  */
2164 static int
2165 dhd_napi_poll(struct napi_struct *napi, int budget)
2166 {
2167         int ifid;
2168         const int pkt_count = 1;
2169         const int chan = 0;
2170         struct sk_buff * skb;
2171         unsigned long flags;
2172         struct dhd_info *dhd;
2173         int processed = 0;
2174         struct sk_buff_head rx_process_queue;
2175
2176         dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2177         DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2178                 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2179
2180         __skb_queue_head_init(&rx_process_queue);
2181
2182         /* extract the entire rx_napi_queue into local rx_process_queue */
2183         spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2184         skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2185         spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2186
2187         while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2188                 OSL_PREFETCH(skb->data);
2189
2190                 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2191
2192                 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2193                         __FUNCTION__, skb, ifid));
2194
2195                 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2196                 processed++;
2197         }
2198
2199         DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2200
2201         DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2202         napi_complete(napi);
2203
2204         return budget - 1;
2205 }
2206
2207 /**
2208  * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2209  * poll list. This function may be invoked via the smp_call_function_single
2210  * from a remote CPU.
2211  *
2212  * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2213  * after the napi_struct is added to the softnet data's poll_list
2214  *
2215  * @info: pointer to a dhd_info struct
2216  */
2217 static void
2218 dhd_napi_schedule(void *info)
2219 {
2220         dhd_info_t *dhd = (dhd_info_t *)info;
2221
2222         DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2223                 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2224
2225         /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2226         if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2227                 __napi_schedule(&dhd->rx_napi_struct);
2228                 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2229         }
2230
2231         /*
2232          * If the rx_napi_struct was already running, then we let it complete
2233          * processing all its packets. The rx_napi_struct may only run on one
2234          * core at a time, to avoid out-of-order handling.
2235          */
2236 }
2237
2238 /**
2239  * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2240  * action after placing the dhd's rx_process napi object in the the remote CPU's
2241  * softnet data's poll_list.
2242  *
2243  * @dhd: dhd_info which has the rx_process napi object
2244  * @on_cpu: desired remote CPU id
2245  */
2246 static INLINE int
2247 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2248 {
2249         int wait = 0; /* asynchronous IPI */
2250
2251         DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2252                 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2253
2254         if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2255                 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2256                         __FUNCTION__, on_cpu));
2257         }
2258
2259         DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2260
2261         return 0;
2262 }
2263
2264 /*
2265  * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2266  * Why should we do this?
2267  * The candidacy algorithm is run from the call back function
2268  * registered to CPU hotplug notifier. This call back happens from Worker
2269  * context. The dhd_napi_schedule_on is also from worker context.
2270  * Note that both of this can run on two different CPUs at the same time.
2271  * So we can possibly have a window where a given CPUn is being brought
2272  * down from CPUm while we try to run a function on CPUn.
2273  * To prevent this its better have the whole code to execute an SMP
2274  * function under get_online_cpus.
2275  * This function call ensures that hotplug mechanism does not kick-in
2276  * until we are done dealing with online CPUs
2277  * If the hotplug worker is already running, no worries because the
2278  * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2279  *
2280  * The below mentioned code structure is proposed in
2281  * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2282  * for the question
2283  * Q: I need to ensure that a particular cpu is not removed when there is some
2284  *    work specific to this cpu is in progress
2285  *
2286  * According to the documentation calling get_online_cpus is NOT required, if
2287  * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2288  * run from Work Queue context we have to call these functions
2289  */
2290 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2291 {
2292         struct dhd_info *dhd =
2293                 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2294         int cpu;
2295
2296         get_online_cpus();
2297         cpu = atomic_read(&dhd->rx_napi_cpu);
2298         if (!cpu_online(cpu))
2299                 dhd_napi_schedule(dhd);
2300         else
2301                 dhd_napi_schedule_on(dhd, cpu);
2302         put_online_cpus();
2303 }
2304
2305 /**
2306  * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2307  * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2308  * the packets enqueued into the rx_napi_queue and sendup.
2309  * The producer's rx packet queue is appended to the rx_napi_queue before
2310  * dispatching the rx_napi_struct.
2311  */
2312 void
2313 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2314 {
2315         unsigned long flags;
2316         dhd_info_t *dhd = dhdp->info;
2317         int curr_cpu;
2318         int on_cpu;
2319
2320         if (dhd->rx_napi_netdev == NULL) {
2321                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2322                 return;
2323         }
2324
2325         DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2326                 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2327
2328         /* append the producer's queue of packets to the napi's rx process queue */
2329         spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2330         skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2331         spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2332
2333         /*
2334          * If the destination CPU is NOT online or is same as current CPU
2335          * no need to schedule the work
2336          */
2337         curr_cpu = get_cpu();
2338         put_cpu();
2339
2340         on_cpu = atomic_read(&dhd->rx_napi_cpu);
2341
2342         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2343                 dhd_napi_schedule(dhd);
2344         } else {
2345                 schedule_work(&dhd->rx_napi_dispatcher_work);
2346         }
2347 }
2348
2349 /**
2350  * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2351  */
2352 void
2353 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2354 {
2355         dhd_info_t *dhd = dhdp->info;
2356
2357         DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2358                 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2359         DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2360         __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2361 }
2362 #endif /* DHD_LB_RXP */
2363
2364 #endif /* DHD_LB */
2365
2366 static void dhd_memdump_work_handler(struct work_struct * work)
2367 {
2368         struct dhd_info *dhd =
2369                 container_of(work, struct dhd_info, dhd_memdump_work.work);
2370
2371         BCM_REFERENCE(dhd);
2372 #ifdef BCMPCIE
2373         dhd_prot_collect_memdump(&dhd->pub);
2374 #endif
2375 }
2376
2377
2378 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2379 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2380 {
2381         dhd_if_t *ifp;
2382         dhd_info_t *dhd = dhdp->info;
2383         int i;
2384
2385         ASSERT(bssidx < DHD_MAX_IFS);
2386         ASSERT(dhdp);
2387
2388         for (i = 0; i < DHD_MAX_IFS; i++) {
2389                 ifp = dhd->iflist[i];
2390                 if (ifp && (ifp->bssidx == bssidx)) {
2391                         DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2392                                 ifp->name, bssidx, i));
2393                         break;
2394                 }
2395         }
2396         return i;
2397 }
2398
2399 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2400 {
2401         uint32 store_idx;
2402         uint32 sent_idx;
2403
2404         if (!skb) {
2405                 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2406                 return BCME_ERROR;
2407         }
2408
2409         dhd_os_rxflock(dhdp);
2410         store_idx = dhdp->store_idx;
2411         sent_idx = dhdp->sent_idx;
2412         if (dhdp->skbbuf[store_idx] != NULL) {
2413                 /* Make sure the previous packets are processed */
2414                 dhd_os_rxfunlock(dhdp);
2415 #ifdef RXF_DEQUEUE_ON_BUSY
2416                 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2417                         skb, store_idx, sent_idx));
2418                 return BCME_BUSY;
2419 #else /* RXF_DEQUEUE_ON_BUSY */
2420                 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2421                         skb, store_idx, sent_idx));
2422                 /* removed msleep here, should use wait_event_timeout if we
2423                  * want to give rx frame thread a chance to run
2424                  */
2425 #if defined(WAIT_DEQUEUE)
2426                 OSL_SLEEP(1);
2427 #endif
2428                 return BCME_ERROR;
2429 #endif /* RXF_DEQUEUE_ON_BUSY */
2430         }
2431         DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2432                 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2433         dhdp->skbbuf[store_idx] = skb;
2434         dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2435         dhd_os_rxfunlock(dhdp);
2436
2437         return BCME_OK;
2438 }
2439
2440 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2441 {
2442         uint32 store_idx;
2443         uint32 sent_idx;
2444         void *skb;
2445
2446         dhd_os_rxflock(dhdp);
2447
2448         store_idx = dhdp->store_idx;
2449         sent_idx = dhdp->sent_idx;
2450         skb = dhdp->skbbuf[sent_idx];
2451
2452         if (skb == NULL) {
2453                 dhd_os_rxfunlock(dhdp);
2454                 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2455                         store_idx, sent_idx));
2456                 return NULL;
2457         }
2458
2459         dhdp->skbbuf[sent_idx] = NULL;
2460         dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2461
2462         DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2463                 skb, sent_idx));
2464
2465         dhd_os_rxfunlock(dhdp);
2466
2467         return skb;
2468 }
2469
2470 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2471 {
2472         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2473
2474         if (prepost) { /* pre process */
2475                 dhd_read_macaddr(dhd);
2476         } else { /* post process */
2477                 dhd_write_macaddr(&dhd->pub.mac);
2478         }
2479
2480         return 0;
2481 }
2482
2483 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2484 static bool
2485 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2486 {
2487         bool _apply = FALSE;
2488         /* In case of IBSS mode, apply arp pkt filter */
2489         if (op_mode & DHD_FLAG_IBSS_MODE) {
2490                 _apply = TRUE;
2491                 goto exit;
2492         }
2493         /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2494         if ((dhd->arp_version == 1) &&
2495                 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2496                 _apply = TRUE;
2497                 goto exit;
2498         }
2499
2500 exit:
2501         return _apply;
2502 }
2503 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2504
2505 void dhd_set_packet_filter(dhd_pub_t *dhd)
2506 {
2507 #ifdef PKT_FILTER_SUPPORT
2508         int i;
2509
2510         DHD_TRACE(("%s: enter\n", __FUNCTION__));
2511         if (dhd_pkt_filter_enable) {
2512                 for (i = 0; i < dhd->pktfilter_count; i++) {
2513                         dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2514                 }
2515         }
2516 #endif /* PKT_FILTER_SUPPORT */
2517 }
2518
2519 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2520 {
2521 #ifdef PKT_FILTER_SUPPORT
2522         int i;
2523
2524         DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2525
2526         if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2527                 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2528                 return;
2529         }
2530         /* 1 - Enable packet filter, only allow unicast packet to send up */
2531         /* 0 - Disable packet filter */
2532         if (dhd_pkt_filter_enable && (!value ||
2533             (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2534         {
2535                 for (i = 0; i < dhd->pktfilter_count; i++) {
2536 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2537                         if (value && (i == DHD_ARP_FILTER_NUM) &&
2538                                 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2539                                 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2540                                         "val %d, cnt %d, op_mode 0x%x\n",
2541                                         value, i, dhd->op_mode));
2542                                 continue;
2543                         }
2544 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2545                         dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2546                                 value, dhd_master_mode);
2547                 }
2548         }
2549 #endif /* PKT_FILTER_SUPPORT */
2550 }
2551
2552 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2553 {
2554 #ifndef SUPPORT_PM2_ONLY
2555         int power_mode = PM_MAX;
2556 #endif /* SUPPORT_PM2_ONLY */
2557 #ifdef SUPPORT_SENSORHUB
2558         uint32 shub_msreq;
2559 #endif /* SUPPORT_SENSORHUB */
2560         /* wl_pkt_filter_enable_t       enable_parm; */
2561         char iovbuf[32];
2562         int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2563 #ifdef DHD_USE_EARLYSUSPEND
2564 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2565         int bcn_timeout = 0;
2566 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2567 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2568         int roam_time_thresh = 0;       /* (ms) */
2569 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2570 #ifndef ENABLE_FW_ROAM_SUSPEND
2571         uint roamvar = dhd->conf->roam_off_suspend;
2572 #endif /* ENABLE_FW_ROAM_SUSPEND */
2573 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2574         int bcn_li_bcn;
2575 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2576         uint nd_ra_filter = 0;
2577         int ret = 0;
2578 #endif /* DHD_USE_EARLYSUSPEND */
2579 #ifdef PASS_ALL_MCAST_PKTS
2580         struct dhd_info *dhdinfo;
2581         uint32 allmulti;
2582         uint i;
2583 #endif /* PASS_ALL_MCAST_PKTS */
2584 #ifdef DYNAMIC_SWOOB_DURATION
2585 #ifndef CUSTOM_INTR_WIDTH
2586 #define CUSTOM_INTR_WIDTH 100
2587         int intr_width = 0;
2588 #endif /* CUSTOM_INTR_WIDTH */
2589 #endif /* DYNAMIC_SWOOB_DURATION */
2590
2591         if (!dhd)
2592                 return -ENODEV;
2593
2594 #ifdef PASS_ALL_MCAST_PKTS
2595         dhdinfo = dhd->info;
2596 #endif /* PASS_ALL_MCAST_PKTS */
2597
2598         DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2599                 __FUNCTION__, value, dhd->in_suspend));
2600
2601         dhd_suspend_lock(dhd);
2602
2603 #ifdef CUSTOM_SET_CPUCORE
2604         DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2605         /* set specific cpucore */
2606         dhd_set_cpucore(dhd, TRUE);
2607 #endif /* CUSTOM_SET_CPUCORE */
2608 #ifndef SUPPORT_PM2_ONLY
2609         if (dhd->conf->pm >= 0)
2610                 power_mode = dhd->conf->pm;
2611 #endif /* SUPPORT_PM2_ONLY */
2612         if (dhd->up) {
2613                 if (value && dhd->in_suspend) {
2614 #ifdef PKT_FILTER_SUPPORT
2615                         dhd->early_suspended = 1;
2616 #endif
2617                         /* Kernel suspended */
2618                         DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2619
2620 #ifdef SUPPORT_SENSORHUB
2621                         shub_msreq = 1;
2622                         if (dhd->info->shub_enable == 1) {
2623                                 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2624                                         iovbuf, sizeof(iovbuf));
2625                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2626                                         iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2627                                         DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2628                                                 __FUNCTION__, ret));
2629                                 }
2630                         }
2631 #endif /* SUPPORT_SENSORHUB */
2632
2633 #ifndef SUPPORT_PM2_ONLY
2634                         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2635                                 sizeof(power_mode), TRUE, 0);
2636 #endif /* SUPPORT_PM2_ONLY */
2637
2638 #ifdef PKT_FILTER_SUPPORT
2639                         /* Enable packet filter,
2640                          * only allow unicast packet to send up
2641                          */
2642                         dhd_enable_packet_filter(1, dhd);
2643 #endif /* PKT_FILTER_SUPPORT */
2644
2645 #ifdef PASS_ALL_MCAST_PKTS
2646                         allmulti = 0;
2647                         bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2648                                 iovbuf, sizeof(iovbuf));
2649                         for (i = 0; i < DHD_MAX_IFS; i++) {
2650                                 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2651                                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2652                                                 sizeof(iovbuf), TRUE, i);
2653                         }
2654 #endif /* PASS_ALL_MCAST_PKTS */
2655
2656                         /* If DTIM skip is set up as default, force it to wake
2657                          * each third DTIM for better power savings.  Note that
2658                          * one side effect is a chance to miss BC/MC packet.
2659                          */
2660 #ifdef WLTDLS
2661                         /* Do not set bcn_li_ditm on WFD mode */
2662                         if (dhd->tdls_mode) {
2663                                 bcn_li_dtim = 0;
2664                         } else
2665 #endif /* WLTDLS */
2666                         bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2667                         bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2668                                 4, iovbuf, sizeof(iovbuf));
2669                         if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2670                                 TRUE, 0) < 0)
2671                                         DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2672
2673 #ifdef DHD_USE_EARLYSUSPEND
2674 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2675                         bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2676                         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2677                                 4, iovbuf, sizeof(iovbuf));
2678                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2679 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2680 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2681                         roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2682                         bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2683                                 4, iovbuf, sizeof(iovbuf));
2684                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2685 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2686 #ifndef ENABLE_FW_ROAM_SUSPEND
2687                         /* Disable firmware roaming during suspend */
2688                         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2689                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2690 #endif /* ENABLE_FW_ROAM_SUSPEND */
2691 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2692                         bcn_li_bcn = 0;
2693                         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2694                                 4, iovbuf, sizeof(iovbuf));
2695                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2696 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2697                         if (FW_SUPPORTED(dhd, ndoe)) {
2698                                 /* enable IPv6 RA filter in  firmware during suspend */
2699                                 nd_ra_filter = 1;
2700                                 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2701                                         iovbuf, sizeof(iovbuf));
2702                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2703                                         sizeof(iovbuf), TRUE, 0)) < 0)
2704                                         DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2705                                                 ret));
2706                         }
2707 #ifdef DYNAMIC_SWOOB_DURATION
2708                         intr_width = CUSTOM_INTR_WIDTH;
2709                         bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2710                                 iovbuf, sizeof(iovbuf));
2711                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2712                                         sizeof(iovbuf), TRUE, 0)) < 0) {
2713                                 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2714                         }
2715 #endif /* DYNAMIC_SWOOB_DURATION */
2716 #endif /* DHD_USE_EARLYSUSPEND */
2717                 } else {
2718 #ifdef PKT_FILTER_SUPPORT
2719                         dhd->early_suspended = 0;
2720 #endif
2721                         /* Kernel resumed  */
2722                         DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2723
2724 #ifdef SUPPORT_SENSORHUB
2725                         shub_msreq = 0;
2726                         if (dhd->info->shub_enable == 1) {
2727                                 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2728                                         4, iovbuf, sizeof(iovbuf));
2729                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2730                                         iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2731                                                 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2732                                                         "failed %d\n", __FUNCTION__, ret));
2733                                 }
2734                         }
2735 #endif /* SUPPORT_SENSORHUB */
2736
2737
2738 #ifdef DYNAMIC_SWOOB_DURATION
2739                         intr_width = 0;
2740                         bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2741                                 iovbuf, sizeof(iovbuf));
2742                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2743                                         sizeof(iovbuf), TRUE, 0)) < 0) {
2744                                 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2745                         }
2746 #endif /* DYNAMIC_SWOOB_DURATION */
2747 #ifndef SUPPORT_PM2_ONLY
2748                         power_mode = PM_FAST;
2749                         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2750                                 sizeof(power_mode), TRUE, 0);
2751 #endif /* SUPPORT_PM2_ONLY */
2752 #ifdef PKT_FILTER_SUPPORT
2753                         /* disable pkt filter */
2754                         dhd_enable_packet_filter(0, dhd);
2755 #endif /* PKT_FILTER_SUPPORT */
2756 #ifdef PASS_ALL_MCAST_PKTS
2757                         allmulti = 1;
2758                         bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2759                                 iovbuf, sizeof(iovbuf));
2760                         for (i = 0; i < DHD_MAX_IFS; i++) {
2761                                 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2762                                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2763                                                 sizeof(iovbuf), TRUE, i);
2764                         }
2765 #endif /* PASS_ALL_MCAST_PKTS */
2766
2767                         /* restore pre-suspend setting for dtim_skip */
2768                         bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2769                                 4, iovbuf, sizeof(iovbuf));
2770
2771                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2772 #ifdef DHD_USE_EARLYSUSPEND
2773 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2774                         bcn_timeout = CUSTOM_BCN_TIMEOUT;
2775                         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2776                                 4, iovbuf, sizeof(iovbuf));
2777                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2778 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2779 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2780                         roam_time_thresh = 2000;
2781                         bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2782                                 4, iovbuf, sizeof(iovbuf));
2783                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2784 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2785 #ifndef ENABLE_FW_ROAM_SUSPEND
2786                         roamvar = dhd_roam_disable;
2787                         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2788                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2789 #endif /* ENABLE_FW_ROAM_SUSPEND */
2790 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2791                         bcn_li_bcn = 1;
2792                         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2793                                 4, iovbuf, sizeof(iovbuf));
2794                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2795 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2796                         if (FW_SUPPORTED(dhd, ndoe)) {
2797                                 /* disable IPv6 RA filter in  firmware during suspend */
2798                                 nd_ra_filter = 0;
2799                                 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2800                                         iovbuf, sizeof(iovbuf));
2801                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2802                                         sizeof(iovbuf), TRUE, 0)) < 0)
2803                                         DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2804                                                 ret));
2805                         }
2806 #endif /* DHD_USE_EARLYSUSPEND */
2807                 }
2808         }
2809         dhd_suspend_unlock(dhd);
2810
2811         return 0;
2812 }
2813
2814 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2815 {
2816         dhd_pub_t *dhdp = &dhd->pub;
2817         int ret = 0;
2818
2819         DHD_OS_WAKE_LOCK(dhdp);
2820         DHD_PERIM_LOCK(dhdp);
2821
2822         /* Set flag when early suspend was called */
2823         dhdp->in_suspend = val;
2824         if ((force || !dhdp->suspend_disable_flag) &&
2825                 dhd_support_sta_mode(dhdp))
2826         {
2827                 ret = dhd_set_suspend(val, dhdp);
2828         }
2829
2830         DHD_PERIM_UNLOCK(dhdp);
2831         DHD_OS_WAKE_UNLOCK(dhdp);
2832         return ret;
2833 }
2834
2835 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2836 static void dhd_early_suspend(struct early_suspend *h)
2837 {
2838         struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2839         DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2840
2841         if (dhd)
2842                 dhd_suspend_resume_helper(dhd, 1, 0);
2843 }
2844
2845 static void dhd_late_resume(struct early_suspend *h)
2846 {
2847         struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2848         DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2849
2850         if (dhd)
2851                 dhd_suspend_resume_helper(dhd, 0, 0);
2852 }
2853 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2854
2855 /*
2856  * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
2857  * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
2858  *
2859  *      dhd_timeout_start(&tmo, usec);
2860  *      while (!dhd_timeout_expired(&tmo))
2861  *              if (poll_something())
2862  *                      break;
2863  *      if (dhd_timeout_expired(&tmo))
2864  *              fatal();
2865  */
2866
2867 void
2868 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2869 {
2870         tmo->limit = usec;
2871         tmo->increment = 0;
2872         tmo->elapsed = 0;
2873         tmo->tick = jiffies_to_usecs(1);
2874 }
2875
2876 int
2877 dhd_timeout_expired(dhd_timeout_t *tmo)
2878 {
2879         /* Does nothing the first call */
2880         if (tmo->increment == 0) {
2881                 tmo->increment = 1;
2882                 return 0;
2883         }
2884
2885         if (tmo->elapsed >= tmo->limit)
2886                 return 1;
2887
2888         /* Add the delay that's about to take place */
2889         tmo->elapsed += tmo->increment;
2890
2891         if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2892                 OSL_DELAY(tmo->increment);
2893                 tmo->increment *= 2;
2894                 if (tmo->increment > tmo->tick)
2895                         tmo->increment = tmo->tick;
2896         } else {
2897                 wait_queue_head_t delay_wait;
2898                 DECLARE_WAITQUEUE(wait, current);
2899                 init_waitqueue_head(&delay_wait);
2900                 add_wait_queue(&delay_wait, &wait);
2901                 set_current_state(TASK_INTERRUPTIBLE);
2902                 (void)schedule_timeout(1);
2903                 remove_wait_queue(&delay_wait, &wait);
2904                 set_current_state(TASK_RUNNING);
2905         }
2906
2907         return 0;
2908 }
2909
2910 int
2911 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2912 {
2913         int i = 0;
2914
2915         if (!dhd) {
2916                 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2917                 return DHD_BAD_IF;
2918         }
2919
2920         while (i < DHD_MAX_IFS) {
2921                 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2922                         return i;
2923                 i++;
2924         }
2925
2926         return DHD_BAD_IF;
2927 }
2928
2929 struct net_device * dhd_idx2net(void *pub, int ifidx)
2930 {
2931         struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2932         struct dhd_info *dhd_info;
2933
2934         if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2935                 return NULL;
2936         dhd_info = dhd_pub->info;
2937         if (dhd_info && dhd_info->iflist[ifidx])
2938                 return dhd_info->iflist[ifidx]->net;
2939         return NULL;
2940 }
2941
2942 int
2943 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2944 {
2945         int i = DHD_MAX_IFS;
2946
2947         ASSERT(dhd);
2948
2949         if (name == NULL || *name == '\0')
2950                 return 0;
2951
2952         while (--i > 0)
2953                 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2954                                 break;
2955
2956         DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2957
2958         return i;       /* default - the primary interface */
2959 }
2960
2961 char *
2962 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2963 {
2964         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2965
2966         ASSERT(dhd);
2967
2968         if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2969                 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2970                 return "<if_bad>";
2971         }
2972
2973         if (dhd->iflist[ifidx] == NULL) {
2974                 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2975                 return "<if_null>";
2976         }
2977
2978         if (dhd->iflist[ifidx]->net)
2979                 return dhd->iflist[ifidx]->net->name;
2980
2981         return "<if_none>";
2982 }
2983
2984 uint8 *
2985 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2986 {
2987         int i;
2988         dhd_info_t *dhd = (dhd_info_t *)dhdp;
2989
2990         ASSERT(dhd);
2991         for (i = 0; i < DHD_MAX_IFS; i++)
2992         if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2993                 return dhd->iflist[i]->mac_addr;
2994
2995         return NULL;
2996 }
2997
2998
2999 static void
3000 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3001 {
3002         struct net_device *dev;
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3004         struct netdev_hw_addr *ha;
3005 #else
3006         struct dev_mc_list *mclist;
3007 #endif
3008         uint32 allmulti, cnt;
3009
3010         wl_ioctl_t ioc;
3011         char *buf, *bufp;
3012         uint buflen;
3013         int ret;
3014
3015         if (!dhd->iflist[ifidx]) {
3016                 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3017                 return;
3018         }
3019         dev = dhd->iflist[ifidx]->net;
3020         if (!dev)
3021                 return;
3022 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3023         netif_addr_lock_bh(dev);
3024 #endif /* LINUX >= 2.6.27 */
3025 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3026         cnt = netdev_mc_count(dev);
3027 #else
3028         cnt = dev->mc_count;
3029 #endif /* LINUX >= 2.6.35 */
3030 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3031         netif_addr_unlock_bh(dev);
3032 #endif /* LINUX >= 2.6.27 */
3033
3034         /* Determine initial value of allmulti flag */
3035         allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3036
3037 #ifdef PASS_ALL_MCAST_PKTS
3038 #ifdef PKT_FILTER_SUPPORT
3039         if (!dhd->pub.early_suspended)
3040 #endif /* PKT_FILTER_SUPPORT */
3041                 allmulti = TRUE;
3042 #endif /* PASS_ALL_MCAST_PKTS */
3043
3044         /* Send down the multicast list first. */
3045
3046
3047         buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3048         if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3049                 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3050                         dhd_ifname(&dhd->pub, ifidx), cnt));
3051                 return;
3052         }
3053
3054         strncpy(bufp, "mcast_list", buflen - 1);
3055         bufp[buflen - 1] = '\0';
3056         bufp += strlen("mcast_list") + 1;
3057
3058         cnt = htol32(cnt);
3059         memcpy(bufp, &cnt, sizeof(cnt));
3060         bufp += sizeof(cnt);
3061
3062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3063         netif_addr_lock_bh(dev);
3064 #endif /* LINUX >= 2.6.27 */
3065 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3066         netdev_for_each_mc_addr(ha, dev) {
3067                 if (!cnt)
3068                         break;
3069                 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3070                 bufp += ETHER_ADDR_LEN;
3071                 cnt--;
3072         }
3073 #else /* LINUX < 2.6.35 */
3074         for (mclist = dev->mc_list; (mclist && (cnt > 0));
3075                         cnt--, mclist = mclist->next) {
3076                 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3077                 bufp += ETHER_ADDR_LEN;
3078         }
3079 #endif /* LINUX >= 2.6.35 */
3080 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3081         netif_addr_unlock_bh(dev);
3082 #endif /* LINUX >= 2.6.27 */
3083
3084         memset(&ioc, 0, sizeof(ioc));
3085         ioc.cmd = WLC_SET_VAR;
3086         ioc.buf = buf;
3087         ioc.len = buflen;
3088         ioc.set = TRUE;
3089
3090         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3091         if (ret < 0) {
3092                 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3093                         dhd_ifname(&dhd->pub, ifidx), cnt));
3094                 allmulti = cnt ? TRUE : allmulti;
3095         }
3096
3097         MFREE(dhd->pub.osh, buf, buflen);
3098
3099         /* Now send the allmulti setting.  This is based on the setting in the
3100          * net_device flags, but might be modified above to be turned on if we
3101          * were trying to set some addresses and dongle rejected it...
3102          */
3103
3104         buflen = sizeof("allmulti") + sizeof(allmulti);
3105         if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3106                 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3107                 return;
3108         }
3109         allmulti = htol32(allmulti);
3110
3111         if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3112                 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3113                            dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3114                 MFREE(dhd->pub.osh, buf, buflen);
3115                 return;
3116         }
3117
3118
3119         memset(&ioc, 0, sizeof(ioc));
3120         ioc.cmd = WLC_SET_VAR;
3121         ioc.buf = buf;
3122         ioc.len = buflen;
3123         ioc.set = TRUE;
3124
3125         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3126         if (ret < 0) {
3127                 DHD_ERROR(("%s: set allmulti %d failed\n",
3128                            dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3129         }
3130
3131         MFREE(dhd->pub.osh, buf, buflen);
3132
3133         /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3134
3135         allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3136
3137         allmulti = htol32(allmulti);
3138
3139         memset(&ioc, 0, sizeof(ioc));
3140         ioc.cmd = WLC_SET_PROMISC;
3141         ioc.buf = &allmulti;
3142         ioc.len = sizeof(allmulti);
3143         ioc.set = TRUE;
3144
3145         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3146         if (ret < 0) {
3147                 DHD_ERROR(("%s: set promisc %d failed\n",
3148                            dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3149         }
3150 }
3151
3152 int
3153 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3154 {
3155         char buf[32];
3156         wl_ioctl_t ioc;
3157         int ret;
3158
3159         if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3160                 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3161                 return -1;
3162         }
3163         memset(&ioc, 0, sizeof(ioc));
3164         ioc.cmd = WLC_SET_VAR;
3165         ioc.buf = buf;
3166         ioc.len = 32;
3167         ioc.set = TRUE;
3168
3169         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3170         if (ret < 0) {
3171                 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3172         } else {
3173                 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3174                 if (ifidx == 0)
3175                         memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3176         }
3177
3178         return ret;
3179 }
3180
3181 #ifdef SOFTAP
3182 extern struct net_device *ap_net_dev;
3183 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3184 #endif
3185
3186 #ifdef DHD_PSTA
3187 /* Get psta/psr configuration configuration */
3188 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3189 {
3190         dhd_info_t *dhd = dhdp->info;
3191         return (int)dhd->psta_mode;
3192 }
3193 /* Set psta/psr configuration configuration */
3194 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3195 {
3196         dhd_info_t *dhd = dhdp->info;
3197         dhd->psta_mode = val;
3198         return 0;
3199 }
3200 #endif /* DHD_PSTA */
3201
3202 static void
3203 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3204 {
3205         dhd_info_t *dhd = handle;
3206         dhd_if_event_t *if_event = event_info;
3207         struct net_device *ndev;
3208         int ifidx, bssidx;
3209         int ret;
3210 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3211         struct wireless_dev *vwdev, *primary_wdev;
3212         struct net_device *primary_ndev;
3213 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3214
3215         if (event != DHD_WQ_WORK_IF_ADD) {
3216                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3217                 return;
3218         }
3219
3220         if (!dhd) {
3221                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3222                 return;
3223         }
3224
3225         if (!if_event) {
3226                 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3227                 return;
3228         }
3229
3230         dhd_net_if_lock_local(dhd);
3231         DHD_OS_WAKE_LOCK(&dhd->pub);
3232         DHD_PERIM_LOCK(&dhd->pub);
3233
3234         ifidx = if_event->event.ifidx;
3235         bssidx = if_event->event.bssidx;
3236         DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3237
3238         /* This path is for non-android case */
3239         /* The interface name in host and in event msg are same */
3240         /* if name in event msg is used to create dongle if list on host */
3241         ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3242                 if_event->mac, bssidx, TRUE, if_event->name);
3243         if (!ndev) {
3244                 DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
3245                 goto done;
3246         }
3247
3248 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3249         vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3250         if (unlikely(!vwdev)) {
3251                 DHD_ERROR(("Could not allocate wireless device\n"));
3252                 goto done;
3253         }
3254         primary_ndev = dhd->pub.info->iflist[0]->net;
3255         primary_wdev = ndev_to_wdev(primary_ndev);
3256         vwdev->wiphy = primary_wdev->wiphy;
3257         vwdev->iftype = if_event->event.role;
3258         vwdev->netdev = ndev;
3259         ndev->ieee80211_ptr = vwdev;
3260         SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3261         DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3262 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3263
3264         DHD_PERIM_UNLOCK(&dhd->pub);
3265         ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3266         DHD_PERIM_LOCK(&dhd->pub);
3267         if (ret != BCME_OK) {
3268                 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3269                 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3270                 goto done;
3271         }
3272 #ifdef PCIE_FULL_DONGLE
3273         /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3274         if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3275                 char iovbuf[WLC_IOCTL_SMLEN];
3276                 uint32 var_int =  1;
3277
3278                 memset(iovbuf, 0, sizeof(iovbuf));
3279                 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3280                 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3281
3282                 if (ret != BCME_OK) {
3283                         DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3284                         dhd_remove_if(&dhd->pub, ifidx, TRUE);
3285                 }
3286         }
3287 #endif /* PCIE_FULL_DONGLE */
3288
3289 done:
3290         MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3291
3292         DHD_PERIM_UNLOCK(&dhd->pub);
3293         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3294         dhd_net_if_unlock_local(dhd);
3295 }
3296
3297 static void
3298 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3299 {
3300         dhd_info_t *dhd = handle;
3301         int ifidx;
3302         dhd_if_event_t *if_event = event_info;
3303
3304
3305         if (event != DHD_WQ_WORK_IF_DEL) {
3306                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3307                 return;
3308         }
3309
3310         if (!dhd) {
3311                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3312                 return;
3313         }
3314
3315         if (!if_event) {
3316                 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3317                 return;
3318         }
3319
3320         dhd_net_if_lock_local(dhd);
3321         DHD_OS_WAKE_LOCK(&dhd->pub);
3322         DHD_PERIM_LOCK(&dhd->pub);
3323
3324         ifidx = if_event->event.ifidx;
3325         DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3326
3327         DHD_PERIM_UNLOCK(&dhd->pub);
3328         dhd_remove_if(&dhd->pub, ifidx, TRUE);
3329         DHD_PERIM_LOCK(&dhd->pub);
3330
3331         MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3332
3333         DHD_PERIM_UNLOCK(&dhd->pub);
3334         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3335         dhd_net_if_unlock_local(dhd);
3336 }
3337
3338 static void
3339 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3340 {
3341         dhd_info_t *dhd = handle;
3342         dhd_if_t *ifp = event_info;
3343
3344         if (event != DHD_WQ_WORK_SET_MAC) {
3345                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3346         }
3347
3348         if (!dhd) {
3349                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3350                 return;
3351         }
3352
3353         dhd_net_if_lock_local(dhd);
3354         DHD_OS_WAKE_LOCK(&dhd->pub);
3355         DHD_PERIM_LOCK(&dhd->pub);
3356
3357 #ifdef SOFTAP
3358         {
3359                 unsigned long flags;
3360                 bool in_ap = FALSE;
3361                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3362                 in_ap = (ap_net_dev != NULL);
3363                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3364
3365                 if (in_ap)  {
3366                         DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3367                                    ifp->net->name));
3368                         goto done;
3369                 }
3370         }
3371 #endif /* SOFTAP */
3372
3373         if (ifp == NULL || !dhd->pub.up) {
3374                 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3375                 goto done;
3376         }
3377
3378         DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3379         ifp->set_macaddress = FALSE;
3380         if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3381                 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3382         else
3383                 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3384
3385 done:
3386         DHD_PERIM_UNLOCK(&dhd->pub);
3387         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3388         dhd_net_if_unlock_local(dhd);
3389 }
3390
3391 static void
3392 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3393 {
3394         dhd_info_t *dhd = handle;
3395         dhd_if_t *ifp = event_info;
3396         int ifidx;
3397
3398         if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3399                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3400                 return;
3401         }
3402
3403         if (!dhd) {
3404                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3405                 return;
3406         }
3407
3408         dhd_net_if_lock_local(dhd);
3409         DHD_OS_WAKE_LOCK(&dhd->pub);
3410         DHD_PERIM_LOCK(&dhd->pub);
3411
3412 #ifdef SOFTAP
3413         {
3414                 bool in_ap = FALSE;
3415                 unsigned long flags;
3416                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3417                 in_ap = (ap_net_dev != NULL);
3418                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3419
3420                 if (in_ap)  {
3421                         DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3422                                    ifp->net->name));
3423                         ifp->set_multicast = FALSE;
3424                         goto done;
3425                 }
3426         }
3427 #endif /* SOFTAP */
3428
3429         if (ifp == NULL || !dhd->pub.up) {
3430                 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3431                 goto done;
3432         }
3433
3434         ifidx = ifp->idx;
3435
3436
3437         _dhd_set_multicast_list(dhd, ifidx);
3438         DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3439
3440 done:
3441         DHD_PERIM_UNLOCK(&dhd->pub);
3442         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3443         dhd_net_if_unlock_local(dhd);
3444 }
3445
3446 static int
3447 dhd_set_mac_address(struct net_device *dev, void *addr)
3448 {
3449         int ret = 0;
3450
3451         dhd_info_t *dhd = DHD_DEV_INFO(dev);
3452         struct sockaddr *sa = (struct sockaddr *)addr;
3453         int ifidx;
3454         dhd_if_t *dhdif;
3455
3456         ifidx = dhd_net2idx(dhd, dev);
3457         if (ifidx == DHD_BAD_IF)
3458                 return -1;
3459
3460         dhdif = dhd->iflist[ifidx];
3461
3462         dhd_net_if_lock_local(dhd);
3463         memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3464         dhdif->set_macaddress = TRUE;
3465         dhd_net_if_unlock_local(dhd);
3466         dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3467                 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3468         return ret;
3469 }
3470
3471 static void
3472 dhd_set_multicast_list(struct net_device *dev)
3473 {
3474         dhd_info_t *dhd = DHD_DEV_INFO(dev);
3475         int ifidx;
3476
3477         ifidx = dhd_net2idx(dhd, dev);
3478         if (ifidx == DHD_BAD_IF)
3479                 return;
3480
3481         dhd->iflist[ifidx]->set_multicast = TRUE;
3482         dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3483                 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
3484 }
3485
3486 #ifdef PROP_TXSTATUS
3487 int
3488 dhd_os_wlfc_block(dhd_pub_t *pub)
3489 {
3490         dhd_info_t *di = (dhd_info_t *)(pub->info);
3491         ASSERT(di != NULL);
3492         spin_lock_bh(&di->wlfc_spinlock);
3493         return 1;
3494 }
3495
3496 int
3497 dhd_os_wlfc_unblock(dhd_pub_t *pub)
3498 {
3499         dhd_info_t *di = (dhd_info_t *)(pub->info);
3500
3501         ASSERT(di != NULL);
3502         spin_unlock_bh(&di->wlfc_spinlock);
3503         return 1;
3504 }
3505
3506 #endif /* PROP_TXSTATUS */
3507
3508 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
3509 typedef struct {
3510         uint16 type;
3511         const char *str;
3512 } PKTTYPE_INFO;
3513
3514 static const PKTTYPE_INFO packet_type_info[] =
3515 {
3516         { ETHER_TYPE_IP, "IP" },
3517         { ETHER_TYPE_ARP, "ARP" },
3518         { ETHER_TYPE_BRCM, "BRCM" },
3519         { ETHER_TYPE_802_1X, "802.1X" },
3520         { ETHER_TYPE_WAI, "WAPI" },
3521         { 0, ""}
3522 };
3523
3524 static const char *_get_packet_type_str(uint16 type)
3525 {
3526         int i;
3527         int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
3528
3529         for (i = 0; i < n; i++) {
3530                 if (packet_type_info[i].type == type)
3531                         return packet_type_info[i].str;
3532         }
3533
3534         return packet_type_info[n].str;
3535 }
3536 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
3537
3538 #if defined(DHD_TX_DUMP)
3539 void
3540 dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
3541 {
3542         uint8 *dump_data;
3543         uint16 protocol;
3544         char *ifname;
3545
3546         dump_data = PKTDATA(osh, pkt);
3547         protocol = (dump_data[12] << 8) | dump_data[13];
3548         ifname = ndev ? ndev->name : "N/A";
3549
3550         DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
3551
3552         if (protocol == ETHER_TYPE_802_1X) {
3553                 dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
3554         }
3555
3556 #if defined(DHD_TX_FULL_DUMP)
3557         {
3558                 int i;
3559                 uint datalen;
3560                 datalen = PKTLEN(osh, pkt);
3561
3562                 for (i = 0; i < datalen; i++) {
3563                         printk("%02X ", dump_data[i]);
3564                         if ((i & 15) == 15)
3565                                 printk("\n");
3566                 }
3567                 printk("\n");
3568         }
3569 #endif /* DHD_TX_FULL_DUMP */
3570 }
3571 #endif /* DHD_TX_DUMP */
3572
3573 /*  This routine do not support Packet chain feature, Currently tested for
3574  *  proxy arp feature
3575  */
3576 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3577 {
3578         struct sk_buff *skb;
3579         void *skbhead = NULL;
3580         void *skbprev = NULL;
3581         dhd_if_t *ifp;
3582         ASSERT(!PKTISCHAINED(p));
3583         skb = PKTTONATIVE(dhdp->osh, p);
3584
3585         ifp = dhdp->info->iflist[ifidx];
3586         skb->dev = ifp->net;
3587 #if defined(BCM_GMAC3)
3588         /* Forwarder capable interfaces use WOFA based forwarding */
3589         if (ifp->fwdh) {
3590                 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
3591                 uint16 * da = (uint16 *)(eh->ether_dhost);
3592                 wofa_t wofa;
3593                 ASSERT(ISALIGNED(da, 2));
3594
3595                 wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
3596                 if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
3597                         if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
3598                                 return BCME_OK;
3599                         }
3600                 }
3601                 PKTFRMNATIVE(dhdp->osh, p);
3602                 PKTFREE(dhdp->osh, p, FALSE);
3603                 return BCME_OK;
3604         }
3605 #endif /* BCM_GMAC3 */
3606
3607         skb->protocol = eth_type_trans(skb, skb->dev);
3608
3609         if (in_interrupt()) {
3610                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3611                         __FUNCTION__, __LINE__);
3612                 netif_rx(skb);
3613         } else {
3614                 if (dhdp->info->rxthread_enabled) {
3615                         if (!skbhead) {
3616                                 skbhead = skb;
3617                         } else {
3618                                 PKTSETNEXT(dhdp->osh, skbprev, skb);
3619                         }
3620                         skbprev = skb;
3621                 } else {
3622                         /* If the receive is not processed inside an ISR,
3623                          * the softirqd must be woken explicitly to service
3624                          * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
3625                          * by netif_rx_ni(), but in earlier kernels, we need
3626                          * to do it manually.
3627                          */
3628                         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3629                                 __FUNCTION__, __LINE__);
3630 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3631                         netif_rx_ni(skb);
3632 #else
3633                         ulong flags;
3634                         netif_rx(skb);
3635                         local_irq_save(flags);
3636                         RAISE_RX_SOFTIRQ();
3637                         local_irq_restore(flags);
3638 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3639                 }
3640         }
3641
3642         if (dhdp->info->rxthread_enabled && skbhead)
3643                 dhd_sched_rxf(dhdp, skbhead);
3644
3645         return BCME_OK;
3646 }
3647
3648 int BCMFASTPATH
3649 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3650 {
3651         int ret = BCME_OK;
3652         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3653         struct ether_header *eh = NULL;
3654 #ifdef DHD_L2_FILTER
3655         dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3656 #endif
3657 #ifdef DHD_8021X_DUMP
3658         struct net_device *ndev;
3659 #endif /* DHD_8021X_DUMP */
3660
3661         /* Reject if down */
3662         if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3663                 /* free the packet here since the caller won't */
3664                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3665                 return -ENODEV;
3666         }
3667
3668 #ifdef PCIE_FULL_DONGLE
3669         if (dhdp->busstate == DHD_BUS_SUSPEND) {
3670                 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3671                 PKTFREE(dhdp->osh, pktbuf, TRUE);
3672                 return -EBUSY;
3673         }
3674 #endif /* PCIE_FULL_DONGLE */
3675
3676 #ifdef DHD_L2_FILTER
3677         /* if dhcp_unicast is enabled, we need to convert the */
3678         /* broadcast DHCP ACK/REPLY packets to Unicast. */
3679         if (ifp->dhcp_unicast) {
3680             uint8* mac_addr;
3681             uint8* ehptr = NULL;
3682             int ret;
3683             ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3684             if (ret == BCME_OK) {
3685                 /*  if given mac address having valid entry in sta list
3686                  *  copy the given mac address, and return with BCME_OK
3687                 */
3688                 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3689                     ehptr = PKTDATA(dhdp->osh, pktbuf);
3690                     bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3691                 }
3692             }
3693         }
3694
3695         if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3696             if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3697                         PKTCFREE(dhdp->osh, pktbuf, TRUE);
3698                         return BCME_ERROR;
3699             }
3700         }
3701
3702         if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3703                 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3704
3705                 /* Drop the packets if l2 filter has processed it already
3706                  * otherwise continue with the normal path
3707                  */
3708                 if (ret == BCME_OK) {
3709                         PKTCFREE(dhdp->osh, pktbuf, TRUE);
3710                         return BCME_ERROR;
3711                 }
3712         }
3713 #endif /* DHD_L2_FILTER */
3714         /* Update multicast statistic */
3715         if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3716                 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3717                 eh = (struct ether_header *)pktdata;
3718
3719                 if (ETHER_ISMULTI(eh->ether_dhost))
3720                         dhdp->tx_multicast++;
3721                 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
3722                         atomic_inc(&dhd->pend_8021x_cnt);
3723 #ifdef DHD_DHCP_DUMP
3724                 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3725                         uint16 dump_hex;
3726                         uint16 source_port;
3727                         uint16 dest_port;
3728                         uint16 udp_port_pos;
3729                         uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
3730                         uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3731                         struct net_device *net;
3732                         char *ifname;
3733
3734                         net = dhd_idx2net(dhdp, ifidx);
3735                         ifname = net ? net->name : "N/A";
3736                         udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3737                         source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
3738                         dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
3739                         if (source_port == 0x0044 || dest_port == 0x0044) {
3740                                 dump_hex = (pktdata[udp_port_pos+249] << 8) |
3741                                         pktdata[udp_port_pos+250];
3742                                 if (dump_hex == 0x0101) {
3743                                         DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
3744                                 } else if (dump_hex == 0x0102) {
3745                                         DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
3746                                 } else if (dump_hex == 0x0103) {
3747                                         DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
3748                                 } else if (dump_hex == 0x0105) {
3749                                         DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
3750                                 } else {
3751                                         DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
3752                                 }
3753 #ifdef DHD_LOSSLESS_ROAMING
3754                                 if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
3755                                         DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
3756                                 }
3757 #endif /* DHD_LOSSLESS_ROAMING */
3758                                 DHD_ERROR(("\n"));
3759                         } else if (source_port == 0x0043 || dest_port == 0x0043) {
3760                                 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
3761                         }
3762                 }
3763 #endif /* DHD_DHCP_DUMP */
3764         } else {
3765                         PKTCFREE(dhdp->osh, pktbuf, TRUE);
3766                         return BCME_ERROR;
3767         }
3768
3769         /* Look into the packet and update the packet priority */
3770 #ifndef PKTPRIO_OVERRIDE
3771         if (PKTPRIO(pktbuf) == 0)
3772 #endif /* !PKTPRIO_OVERRIDE */
3773         {
3774 #ifdef QOS_MAP_SET
3775                 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3776 #else
3777                 pktsetprio(pktbuf, FALSE);
3778 #endif /* QOS_MAP_SET */
3779         }
3780
3781
3782 #ifdef PCIE_FULL_DONGLE
3783         /*
3784          * Lkup the per interface hash table, for a matching flowring. If one is not
3785          * available, allocate a unique flowid and add a flowring entry.
3786          * The found or newly created flowid is placed into the pktbuf's tag.
3787          */
3788         ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3789         if (ret != BCME_OK) {
3790                 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3791                 return ret;
3792         }
3793 #endif
3794
3795 #if defined(DHD_TX_DUMP)
3796         ndev = dhd_idx2net(dhdp, ifidx);
3797         dhd_tx_dump(ndev, dhdp->osh, pktbuf);
3798 #endif
3799         /* terence 20150901: Micky add to ajust the 802.1X priority */
3800         /* Set the 802.1X packet with the highest priority 7 */
3801         if (dhdp->conf->pktprio8021x >= 0)
3802                 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3803
3804 #ifdef PROP_TXSTATUS
3805         if (dhd_wlfc_is_supported(dhdp)) {
3806                 /* store the interface ID */
3807                 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3808
3809                 /* store destination MAC in the tag as well */
3810                 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3811
3812                 /* decide which FIFO this packet belongs to */
3813                 if (ETHER_ISMULTI(eh->ether_dhost))
3814                         /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3815                         DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3816                 else
3817                         DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3818         } else
3819 #endif /* PROP_TXSTATUS */
3820         {
3821                 /* If the protocol uses a data header, apply it */
3822                 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3823         }
3824
3825         /* Use bus module to send data frame */
3826 #ifdef WLMEDIA_HTSF
3827         dhd_htsf_addtxts(dhdp, pktbuf);
3828 #endif
3829 #ifdef PROP_TXSTATUS
3830         {
3831                 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3832                         dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3833                         /* non-proptxstatus way */
3834 #ifdef BCMPCIE
3835                         ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3836 #else
3837                         ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3838 #endif /* BCMPCIE */
3839                 }
3840         }
3841 #else
3842 #ifdef BCMPCIE
3843         ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3844 #else
3845         ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3846 #endif /* BCMPCIE */
3847 #endif /* PROP_TXSTATUS */
3848
3849         return ret;
3850 }
3851
3852 int BCMFASTPATH
3853 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3854 {
3855         int ret = 0;
3856         unsigned long flags;
3857
3858         DHD_GENERAL_LOCK(dhdp, flags);
3859         if (dhdp->busstate == DHD_BUS_DOWN ||
3860                         dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3861                 DHD_ERROR(("%s: returning as busstate=%d\n",
3862                         __FUNCTION__, dhdp->busstate));
3863                 DHD_GENERAL_UNLOCK(dhdp, flags);
3864                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3865                 return -ENODEV;
3866         }
3867         dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
3868         DHD_GENERAL_UNLOCK(dhdp, flags);
3869
3870 #ifdef DHD_PCIE_RUNTIMEPM
3871         if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3872                 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3873                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3874                 ret = -EBUSY;
3875                 goto exit;
3876         }
3877 #endif /* DHD_PCIE_RUNTIMEPM */
3878
3879         ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3880
3881 #ifdef DHD_PCIE_RUNTIMEPM
3882 exit:
3883 #endif
3884         DHD_GENERAL_LOCK(dhdp, flags);
3885         dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
3886         DHD_GENERAL_UNLOCK(dhdp, flags);
3887         return ret;
3888 }
3889
3890 int BCMFASTPATH
3891 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3892 {
3893         int ret;
3894         uint datalen;
3895         void *pktbuf;
3896         dhd_info_t *dhd = DHD_DEV_INFO(net);
3897         dhd_if_t *ifp = NULL;
3898         int ifidx;
3899         unsigned long flags;
3900 #ifdef WLMEDIA_HTSF
3901         uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3902 #else
3903         uint8 htsfdlystat_sz = 0;
3904 #endif 
3905 #ifdef DHD_WMF
3906         struct ether_header *eh;
3907         uint8 *iph;
3908 #endif /* DHD_WMF */
3909
3910         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3911
3912
3913 #ifdef PCIE_FULL_DONGLE
3914         DHD_GENERAL_LOCK(&dhd->pub, flags);
3915         dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
3916         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3917 #endif /* PCIE_FULL_DONGLE */
3918
3919 #ifdef DHD_PCIE_RUNTIMEPM
3920         if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3921                 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3922                 /* stop the network queue temporarily until resume done */
3923                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3924                 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3925                         dhd_bus_stop_queue(dhd->pub.bus);
3926                 }
3927                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3928                 dhd_os_busbusy_wake(&dhd->pub);
3929                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3930 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3931                 return -ENODEV;
3932 #else
3933                 return NETDEV_TX_BUSY;
3934 #endif
3935         }
3936 #endif /* DHD_PCIE_RUNTIMEPM */
3937
3938         DHD_GENERAL_LOCK(&dhd->pub, flags);
3939 #ifdef PCIE_FULL_DONGLE
3940         if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3941                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3942                 dhd_os_busbusy_wake(&dhd->pub);
3943                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3944 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3945                 return -ENODEV;
3946 #else
3947                 return NETDEV_TX_BUSY;
3948 #endif
3949         }
3950 #endif /* PCIE_FULL_DONGLE */
3951
3952         DHD_OS_WAKE_LOCK(&dhd->pub);
3953         DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3954
3955         /* Reject if down */
3956         if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
3957                 dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3958                 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3959                         __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3960                 netif_stop_queue(net);
3961                 /* Send Event when bus down detected during data session */
3962                 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3963                         DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3964                         dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3965                         net_os_send_hang_message(net);
3966                 }
3967 #ifdef PCIE_FULL_DONGLE
3968                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3969                 dhd_os_busbusy_wake(&dhd->pub);
3970                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3971 #endif /* PCIE_FULL_DONGLE */
3972                 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3973                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3974 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3975                 return -ENODEV;
3976 #else
3977                 return NETDEV_TX_BUSY;
3978 #endif
3979         }
3980
3981         ifp = DHD_DEV_IFP(net);
3982         ifidx = DHD_DEV_IFIDX(net);
3983         BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
3984
3985         if (ifidx == DHD_BAD_IF) {
3986                 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
3987                 netif_stop_queue(net);
3988 #ifdef PCIE_FULL_DONGLE
3989                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3990                 dhd_os_busbusy_wake(&dhd->pub);
3991                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3992 #endif /* PCIE_FULL_DONGLE */
3993                 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3994                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3995 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3996                 return -ENODEV;
3997 #else
3998                 return NETDEV_TX_BUSY;
3999 #endif
4000         }
4001         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4002
4003         ASSERT(ifidx == dhd_net2idx(dhd, net));
4004         ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4005
4006         bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4007
4008         /* re-align socket buffer if "skb->data" is odd address */
4009         if (((unsigned long)(skb->data)) & 0x1) {
4010                 unsigned char *data = skb->data;
4011                 uint32 length = skb->len;
4012                 PKTPUSH(dhd->pub.osh, skb, 1);
4013                 memmove(skb->data, data, length);
4014                 PKTSETLEN(dhd->pub.osh, skb, length);
4015         }
4016
4017         datalen  = PKTLEN(dhd->pub.osh, skb);
4018
4019         /* Make sure there's enough room for any header */
4020         if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4021                 struct sk_buff *skb2;
4022
4023                 DHD_INFO(("%s: insufficient headroom\n",
4024                           dhd_ifname(&dhd->pub, ifidx)));
4025                 dhd->pub.tx_realloc++;
4026
4027                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4028                 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4029
4030                 dev_kfree_skb(skb);
4031                 if ((skb = skb2) == NULL) {
4032                         DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4033                                    dhd_ifname(&dhd->pub, ifidx)));
4034                         ret = -ENOMEM;
4035                         goto done;
4036                 }
4037                 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4038         }
4039
4040         /* Convert to packet */
4041         if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4042                 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4043                            dhd_ifname(&dhd->pub, ifidx)));
4044                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4045                 dev_kfree_skb_any(skb);
4046                 ret = -ENOMEM;
4047                 goto done;
4048         }
4049
4050 #if defined(WLMEDIA_HTSF)
4051         if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
4052                 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
4053                 struct ether_header *eh = (struct ether_header *)pktdata;
4054
4055                 if (!ETHER_ISMULTI(eh->ether_dhost) &&
4056                         (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
4057                         eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
4058                 }
4059         }
4060 #endif 
4061
4062 #ifdef DHD_WMF
4063         eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4064         iph = (uint8 *)eh + ETHER_HDR_LEN;
4065
4066         /* WMF processing for multicast packets
4067          * Only IPv4 packets are handled
4068          */
4069         if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4070                 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4071                 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4072 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4073                 void *sdu_clone;
4074                 bool ucast_convert = FALSE;
4075 #ifdef DHD_UCAST_UPNP
4076                 uint32 dest_ip;
4077
4078                 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4079                 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4080 #endif /* DHD_UCAST_UPNP */
4081 #ifdef DHD_IGMP_UCQUERY
4082                 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4083                         (IPV4_PROT(iph) == IP_PROT_IGMP) &&
4084                         (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4085 #endif /* DHD_IGMP_UCQUERY */
4086                 if (ucast_convert) {
4087                         dhd_sta_t *sta;
4088 #ifdef PCIE_FULL_DONGLE
4089                         unsigned long flags;
4090 #endif
4091                         struct list_head snapshot_list;
4092                         struct list_head *wmf_ucforward_list;
4093
4094                         ret = NETDEV_TX_OK;
4095
4096                         /* For non BCM_GMAC3 platform we need a snapshot sta_list to
4097                          * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4098                          */
4099                         wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4100
4101                         /* Convert upnp/igmp query to unicast for each assoc STA */
4102                         list_for_each_entry(sta, wmf_ucforward_list, list) {
4103                                 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4104                                         ret = WMF_NOP;
4105                                         break;
4106                                 }
4107                                 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4108                         }
4109                         DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4110
4111 #ifdef PCIE_FULL_DONGLE
4112                         DHD_GENERAL_LOCK(&dhd->pub, flags);
4113                         dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4114                         dhd_os_busbusy_wake(&dhd->pub);
4115                         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4116 #endif /* PCIE_FULL_DONGLE */
4117                         DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4118                         DHD_OS_WAKE_UNLOCK(&dhd->pub);
4119
4120                         if (ret == NETDEV_TX_OK)
4121                                 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4122
4123                         return ret;
4124                 } else
4125 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4126                 {
4127                         /* There will be no STA info if the packet is coming from LAN host
4128                          * Pass as NULL
4129                          */
4130                         ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4131                         switch (ret) {
4132                         case WMF_TAKEN:
4133                         case WMF_DROP:
4134                                 /* Either taken by WMF or we should drop it.
4135                                  * Exiting send path
4136                                  */
4137 #ifdef PCIE_FULL_DONGLE
4138                                 DHD_GENERAL_LOCK(&dhd->pub, flags);
4139                                 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4140                                 dhd_os_busbusy_wake(&dhd->pub);
4141                                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4142 #endif /* PCIE_FULL_DONGLE */
4143                                 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4144                                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4145                                 return NETDEV_TX_OK;
4146                         default:
4147                                 /* Continue the transmit path */
4148                                 break;
4149                         }
4150                 }
4151         }
4152 #endif /* DHD_WMF */
4153 #ifdef DHD_PSTA
4154         /* PSR related packet proto manipulation should be done in DHD
4155          * since dongle doesn't have complete payload
4156          */
4157         if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
4158                 ifidx, &pktbuf, TRUE) < 0)) {
4159                         DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4160                                 dhd_ifname(&dhd->pub, ifidx)));
4161         }
4162 #endif /* DHD_PSTA */
4163
4164 #ifdef DHDTCPACK_SUPPRESS
4165         if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4166                 /* If this packet has been hold or got freed, just return */
4167                 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4168                         ret = 0;
4169                         goto done;
4170                 }
4171         } else {
4172                 /* If this packet has replaced another packet and got freed, just return */
4173                 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4174                         ret = 0;
4175                         goto done;
4176                 }
4177         }
4178 #endif /* DHDTCPACK_SUPPRESS */
4179
4180         /* no segmented SKB support (Kernel-3.18.y) */
4181         if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
4182                 PKTSETLINK(skb, NULL);
4183         }
4184
4185         ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4186
4187 done:
4188         if (ret) {
4189                 ifp->stats.tx_dropped++;
4190                 dhd->pub.tx_dropped++;
4191         } else {
4192
4193 #ifdef PROP_TXSTATUS
4194                 /* tx_packets counter can counted only when wlfc is disabled */
4195                 if (!dhd_wlfc_is_supported(&dhd->pub))
4196 #endif
4197                 {
4198                         dhd->pub.tx_packets++;
4199                         ifp->stats.tx_packets++;
4200                         ifp->stats.tx_bytes += datalen;
4201                 }
4202         }
4203
4204 #ifdef PCIE_FULL_DONGLE
4205         DHD_GENERAL_LOCK(&dhd->pub, flags);
4206         dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4207         dhd_os_busbusy_wake(&dhd->pub);
4208         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4209 #endif /* PCIE_FULL_DONGLE */
4210
4211         DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4212         DHD_OS_WAKE_UNLOCK(&dhd->pub);
4213         BUZZZ_LOG(START_XMIT_END, 0);
4214
4215         /* Return ok: we always eat the packet */
4216 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4217         return 0;
4218 #else
4219         return NETDEV_TX_OK;
4220 #endif
4221 }
4222
4223
4224 void
4225 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4226 {
4227         struct net_device *net;
4228         dhd_info_t *dhd = dhdp->info;
4229         int i;
4230
4231         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4232
4233         ASSERT(dhd);
4234
4235 #ifdef DHD_LOSSLESS_ROAMING
4236         /* block flowcontrol during roaming */
4237         if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4238                 return;
4239         }
4240 #endif
4241
4242         if (ifidx == ALL_INTERFACES) {
4243                 /* Flow control on all active interfaces */
4244                 dhdp->txoff = state;
4245                 for (i = 0; i < DHD_MAX_IFS; i++) {
4246                         if (dhd->iflist[i]) {
4247                                 net = dhd->iflist[i]->net;
4248                                 if (state == ON)
4249                                         netif_stop_queue(net);
4250                                 else
4251                                         netif_wake_queue(net);
4252                         }
4253                 }
4254         } else {
4255                 if (dhd->iflist[ifidx]) {
4256                         net = dhd->iflist[ifidx]->net;
4257                         if (state == ON)
4258                                 netif_stop_queue(net);
4259                         else
4260                                 netif_wake_queue(net);
4261                 }
4262         }
4263 }
4264
4265
4266 #ifdef DHD_WMF
4267 bool
4268 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4269 {
4270         dhd_info_t *dhd = dhdp->info;
4271
4272         return dhd->rxthread_enabled;
4273 }
4274 #endif /* DHD_WMF */
4275
4276 /** Called when a frame is received by the dongle on interface 'ifidx' */
4277 void
4278 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4279 {
4280         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4281         struct sk_buff *skb;
4282         uchar *eth;
4283         uint len;
4284         void *data, *pnext = NULL;
4285         int i;
4286         dhd_if_t *ifp;
4287         wl_event_msg_t event;
4288         int tout_rx = 0;
4289         int tout_ctrl = 0;
4290         void *skbhead = NULL;
4291         void *skbprev = NULL;
4292 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4293         char *dump_data;
4294         uint16 protocol;
4295         char *ifname;
4296 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4297
4298         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4299
4300         for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4301                 struct ether_header *eh;
4302
4303                 pnext = PKTNEXT(dhdp->osh, pktbuf);
4304                 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4305
4306                 ifp = dhd->iflist[ifidx];
4307                 if (ifp == NULL) {
4308                         DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4309                                 __FUNCTION__));
4310                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4311                         continue;
4312                 }
4313
4314                 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4315
4316                 /* Dropping only data packets before registering net device to avoid kernel panic */
4317 #ifndef PROP_TXSTATUS_VSDB
4318                 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4319                         (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4320 #else
4321                 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4322                         (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4323 #endif /* PROP_TXSTATUS_VSDB */
4324                 {
4325                         DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4326                         __FUNCTION__));
4327                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4328                         continue;
4329                 }
4330
4331
4332 #ifdef PROP_TXSTATUS
4333                 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4334                         /* WLFC may send header only packet when
4335                         there is an urgent message but no packet to
4336                         piggy-back on
4337                         */
4338                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4339                         continue;
4340                 }
4341 #endif
4342 #ifdef DHD_L2_FILTER
4343                 /* If block_ping is enabled drop the ping packet */
4344                 if (ifp->block_ping) {
4345                         if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4346                                 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4347                                 continue;
4348                         }
4349                 }
4350                 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4351                     if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4352                                 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4353                                 continue;
4354                     }
4355                 }
4356                 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4357                         int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4358
4359                         /* Drop the packets if l2 filter has processed it already
4360                          * otherwise continue with the normal path
4361                          */
4362                         if (ret == BCME_OK) {
4363                                 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4364                                 continue;
4365                         }
4366                 }
4367 #endif /* DHD_L2_FILTER */
4368 #ifdef DHD_WMF
4369                 /* WMF processing for multicast packets */
4370                 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
4371                         dhd_sta_t *sta;
4372                         int ret;
4373
4374                         sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
4375                         ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
4376                         switch (ret) {
4377                                 case WMF_TAKEN:
4378                                         /* The packet is taken by WMF. Continue to next iteration */
4379                                         continue;
4380                                 case WMF_DROP:
4381                                         /* Packet DROP decision by WMF. Toss it */
4382                                         DHD_ERROR(("%s: WMF decides to drop packet\n",
4383                                                 __FUNCTION__));
4384                                         PKTCFREE(dhdp->osh, pktbuf, FALSE);
4385                                         continue;
4386                                 default:
4387                                         /* Continue the transmit path */
4388                                         break;
4389                         }
4390                 }
4391 #endif /* DHD_WMF */
4392
4393 #ifdef DHDTCPACK_SUPPRESS
4394                 dhd_tcpdata_info_get(dhdp, pktbuf);
4395 #endif
4396                 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4397
4398                 ASSERT(ifp);
4399                 skb->dev = ifp->net;
4400
4401 #ifdef DHD_PSTA
4402                 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4403                                 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4404                                         dhd_ifname(dhdp, ifidx)));
4405                 }
4406 #endif /* DHD_PSTA */
4407
4408 #ifdef PCIE_FULL_DONGLE
4409                 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4410                         (!ifp->ap_isolate)) {
4411                         eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4412                         if (ETHER_ISUCAST(eh->ether_dhost)) {
4413                                 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4414                                         dhd_sendpkt(dhdp, ifidx, pktbuf);
4415                                         continue;
4416                                 }
4417                         } else {
4418                                 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4419                                 if (npktbuf)
4420                                         dhd_sendpkt(dhdp, ifidx, npktbuf);
4421                         }
4422                 }
4423 #endif /* PCIE_FULL_DONGLE */
4424
4425                 /* Get the protocol, maintain skb around eth_type_trans()
4426                  * The main reason for this hack is for the limitation of
4427                  * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4428                  * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4429                  * coping of the packet coming from the network stack to add
4430                  * BDC, Hardware header etc, during network interface registration
4431                  * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4432                  * for BDC, Hardware header etc. and not just the ETH_HLEN
4433                  */
4434                 eth = skb->data;
4435                 len = skb->len;
4436
4437 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4438                 dump_data = skb->data;
4439                 protocol = (dump_data[12] << 8) | dump_data[13];
4440                 ifname = skb->dev ? skb->dev->name : "N/A";
4441 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4442 #ifdef DHD_8021X_DUMP
4443                 if (protocol == ETHER_TYPE_802_1X) {
4444                         dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
4445                 }
4446 #endif /* DHD_8021X_DUMP */
4447 #ifdef DHD_DHCP_DUMP
4448                 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
4449                         uint16 dump_hex;
4450                         uint16 source_port;
4451                         uint16 dest_port;
4452                         uint16 udp_port_pos;
4453                         uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
4454                         uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
4455
4456                         udp_port_pos = ETHER_HDR_LEN + ip_header_len;
4457                         source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
4458                         dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
4459                         if (source_port == 0x0044 || dest_port == 0x0044) {
4460                                 dump_hex = (dump_data[udp_port_pos+249] << 8) |
4461                                         dump_data[udp_port_pos+250];
4462                                 if (dump_hex == 0x0101) {
4463                                         DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
4464                                 } else if (dump_hex == 0x0102) {
4465                                         DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
4466                                 } else if (dump_hex == 0x0103) {
4467                                         DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
4468                                 } else if (dump_hex == 0x0105) {
4469                                         DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
4470                                 } else {
4471                                         DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
4472                                 }
4473                         } else if (source_port == 0x0043 || dest_port == 0x0043) {
4474                                 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
4475                         }
4476                 }
4477 #endif /* DHD_DHCP_DUMP */
4478 #if defined(DHD_RX_DUMP)
4479                 DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
4480                 if (protocol != ETHER_TYPE_BRCM) {
4481                         if (dump_data[0] == 0xFF) {
4482                                 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
4483
4484                                 if ((dump_data[12] == 8) &&
4485                                         (dump_data[13] == 6)) {
4486                                         DHD_ERROR(("%s: ARP %d\n",
4487                                                 __FUNCTION__, dump_data[0x15]));
4488                                 }
4489                         } else if (dump_data[0] & 1) {
4490                                 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
4491                                         __FUNCTION__, MAC2STRDBG(dump_data)));
4492                         }
4493 #ifdef DHD_RX_FULL_DUMP
4494                         {
4495                                 int k;
4496                                 for (k = 0; k < skb->len; k++) {
4497                                         printk("%02X ", dump_data[k]);
4498                                         if ((k & 15) == 15)
4499                                                 printk("\n");
4500                                 }
4501                                 printk("\n");
4502                         }
4503 #endif /* DHD_RX_FULL_DUMP */
4504                 }
4505 #endif /* DHD_RX_DUMP */
4506
4507                 skb->protocol = eth_type_trans(skb, skb->dev);
4508
4509                 if (skb->pkt_type == PACKET_MULTICAST) {
4510                         dhd->pub.rx_multicast++;
4511                         ifp->stats.multicast++;
4512                 }
4513
4514                 skb->data = eth;
4515                 skb->len = len;
4516
4517 #ifdef WLMEDIA_HTSF
4518                 dhd_htsf_addrxts(dhdp, pktbuf);
4519 #endif
4520                 /* Strip header, count, deliver upward */
4521                 skb_pull(skb, ETH_HLEN);
4522
4523                 /* Process special event packets and then discard them */
4524                 memset(&event, 0, sizeof(event));
4525                 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4526                         dhd_wl_host_event(dhd, &ifidx,
4527 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4528                         skb_mac_header(skb),
4529 #else
4530                         skb->mac.raw,
4531 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4532                         &event,
4533                         &data);
4534
4535                         wl_event_to_host_order(&event);
4536                         if (!tout_ctrl)
4537                                 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4538
4539 #if defined(PNO_SUPPORT)
4540                         if (event.event_type == WLC_E_PFN_NET_FOUND) {
4541                                 /* enforce custom wake lock to garantee that Kernel not suspended */
4542                                 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4543                         }
4544 #endif /* PNO_SUPPORT */
4545
4546 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
4547 #ifdef DHD_USE_STATIC_CTRLBUF
4548                         PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4549 #else
4550                         PKTFREE(dhdp->osh, pktbuf, FALSE);
4551 #endif /* DHD_USE_STATIC_CTRLBUF */
4552                         continue;
4553 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
4554                 } else {
4555                         tout_rx = DHD_PACKET_TIMEOUT_MS;
4556
4557 #ifdef PROP_TXSTATUS
4558                         dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4559 #endif /* PROP_TXSTATUS */
4560                 }
4561
4562                 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4563                 ifp = dhd->iflist[ifidx];
4564
4565                 if (ifp->net)
4566                         ifp->net->last_rx = jiffies;
4567
4568                 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4569                         dhdp->dstats.rx_bytes += skb->len;
4570                         dhdp->rx_packets++; /* Local count */
4571                         ifp->stats.rx_bytes += skb->len;
4572                         ifp->stats.rx_packets++;
4573                 }
4574
4575                 if (in_interrupt()) {
4576                         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4577                                 __FUNCTION__, __LINE__);
4578                         DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4579 #if defined(DHD_LB) && defined(DHD_LB_RXP)
4580                         netif_receive_skb(skb);
4581 #else
4582                         netif_rx(skb);
4583 #endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4584                         DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4585                 } else {
4586                         if (dhd->rxthread_enabled) {
4587                                 if (!skbhead)
4588                                         skbhead = skb;
4589                                 else
4590                                         PKTSETNEXT(dhdp->osh, skbprev, skb);
4591                                 skbprev = skb;
4592                         } else {
4593
4594                                 /* If the receive is not processed inside an ISR,
4595                                  * the softirqd must be woken explicitly to service
4596                                  * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
4597                                  * by netif_rx_ni(), but in earlier kernels, we need
4598                                  * to do it manually.
4599                                  */
4600                                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4601                                         __FUNCTION__, __LINE__);
4602
4603 #if defined(DHD_LB) && defined(DHD_LB_RXP)
4604                                 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4605                                 netif_receive_skb(skb);
4606                                 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4607 #else
4608 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4609                                 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4610                                 netif_rx_ni(skb);
4611                                 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4612 #else
4613                                 ulong flags;
4614                                 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4615                                 netif_rx(skb);
4616                                 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4617                                 local_irq_save(flags);
4618                                 RAISE_RX_SOFTIRQ();
4619                                 local_irq_restore(flags);
4620 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4621 #endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4622                         }
4623                 }
4624         }
4625
4626         if (dhd->rxthread_enabled && skbhead)
4627                 dhd_sched_rxf(dhdp, skbhead);
4628
4629         DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4630         DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4631         DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
4632 }
4633
4634 void
4635 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4636 {
4637         /* Linux version has nothing to do */
4638         return;
4639 }
4640
4641 void
4642 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4643 {
4644         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4645         struct ether_header *eh;
4646         uint16 type;
4647
4648         dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4649
4650         eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4651         type  = ntoh16(eh->ether_type);
4652
4653         if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
4654                 atomic_dec(&dhd->pend_8021x_cnt);
4655
4656 #ifdef PROP_TXSTATUS
4657         if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4658                 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4659                 uint datalen  = PKTLEN(dhd->pub.osh, txp);
4660                 if (ifp != NULL) {
4661                         if (success) {
4662                                 dhd->pub.tx_packets++;
4663                                 ifp->stats.tx_packets++;
4664                                 ifp->stats.tx_bytes += datalen;
4665                         } else {
4666                                 ifp->stats.tx_dropped++;
4667                         }
4668                 }
4669         }
4670 #endif
4671 }
4672
4673 static struct net_device_stats *
4674 dhd_get_stats(struct net_device *net)
4675 {
4676         dhd_info_t *dhd = DHD_DEV_INFO(net);
4677         dhd_if_t *ifp;
4678         int ifidx;
4679
4680         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4681
4682         ifidx = dhd_net2idx(dhd, net);
4683         if (ifidx == DHD_BAD_IF) {
4684                 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4685
4686                 memset(&net->stats, 0, sizeof(net->stats));
4687                 return &net->stats;
4688         }
4689
4690         ifp = dhd->iflist[ifidx];
4691         ASSERT(dhd && ifp);
4692
4693         if (dhd->pub.up) {
4694                 /* Use the protocol to get dongle stats */
4695                 dhd_prot_dstats(&dhd->pub);
4696         }
4697         return &ifp->stats;
4698 }
4699
4700 static int
4701 dhd_watchdog_thread(void *data)
4702 {
4703         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4704         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4705         /* This thread doesn't need any user-level access,
4706          * so get rid of all our resources
4707          */
4708         if (dhd_watchdog_prio > 0) {
4709                 struct sched_param param;
4710                 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
4711                         dhd_watchdog_prio:(MAX_RT_PRIO-1);
4712                 setScheduler(current, SCHED_FIFO, &param);
4713         }
4714
4715         while (1) {
4716                 if (down_interruptible (&tsk->sema) == 0) {
4717                         unsigned long flags;
4718                         unsigned long jiffies_at_start = jiffies;
4719                         unsigned long time_lapse;
4720
4721                         DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4722                         SMP_RD_BARRIER_DEPENDS();
4723                         if (tsk->terminated) {
4724                                 break;
4725                         }
4726
4727                         if (dhd->pub.dongle_reset == FALSE) {
4728                                 DHD_TIMER(("%s:\n", __FUNCTION__));
4729                                 dhd_bus_watchdog(&dhd->pub);
4730
4731                                 DHD_GENERAL_LOCK(&dhd->pub, flags);
4732                                 /* Count the tick for reference */
4733                                 dhd->pub.tickcnt++;
4734 #ifdef DHD_L2_FILTER
4735                                 dhd_l2_filter_watchdog(&dhd->pub);
4736 #endif /* DHD_L2_FILTER */
4737                                 time_lapse = jiffies - jiffies_at_start;
4738
4739                                 /* Reschedule the watchdog */
4740                                 if (dhd->wd_timer_valid) {
4741                                         mod_timer(&dhd->timer,
4742                                             jiffies +
4743                                             msecs_to_jiffies(dhd_watchdog_ms) -
4744                                             min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
4745                                 }
4746                                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4747                         }
4748                         DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4749                 } else {
4750                         break;
4751                 }
4752         }
4753
4754         complete_and_exit(&tsk->completed, 0);
4755 }
4756
4757 static void dhd_watchdog(ulong data)
4758 {
4759         dhd_info_t *dhd = (dhd_info_t *)data;
4760         unsigned long flags;
4761
4762         if (dhd->pub.dongle_reset) {
4763                 return;
4764         }
4765
4766         if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4767                 DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
4768                 return;
4769         }
4770
4771         if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4772                 up(&dhd->thr_wdt_ctl.sema);
4773                 return;
4774         }
4775
4776         DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4777         /* Call the bus module watchdog */
4778         dhd_bus_watchdog(&dhd->pub);
4779         DHD_GENERAL_LOCK(&dhd->pub, flags);
4780         /* Count the tick for reference */
4781         dhd->pub.tickcnt++;
4782
4783 #ifdef DHD_L2_FILTER
4784         dhd_l2_filter_watchdog(&dhd->pub);
4785 #endif /* DHD_L2_FILTER */
4786         /* Reschedule the watchdog */
4787         if (dhd->wd_timer_valid)
4788                 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
4789         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4790         DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4791 }
4792
4793 #ifdef DHD_PCIE_RUNTIMEPM
4794 static int
4795 dhd_rpm_state_thread(void *data)
4796 {
4797         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4798         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4799
4800         while (1) {
4801                 if (down_interruptible (&tsk->sema) == 0) {
4802                         unsigned long flags;
4803                         unsigned long jiffies_at_start = jiffies;
4804                         unsigned long time_lapse;
4805
4806                         SMP_RD_BARRIER_DEPENDS();
4807                         if (tsk->terminated) {
4808                                 break;
4809                         }
4810
4811                         if (dhd->pub.dongle_reset == FALSE) {
4812                                 DHD_TIMER(("%s:\n", __FUNCTION__));
4813                                 if (dhd->pub.up) {
4814                                         dhd_runtimepm_state(&dhd->pub);
4815                                 }
4816
4817                                 DHD_GENERAL_LOCK(&dhd->pub, flags);
4818                                 time_lapse = jiffies - jiffies_at_start;
4819
4820                                 /* Reschedule the watchdog */
4821                                 if (dhd->rpm_timer_valid) {
4822                                         mod_timer(&dhd->rpm_timer,
4823                                                 jiffies +
4824                                                 msecs_to_jiffies(dhd_runtimepm_ms) -
4825                                                 min(msecs_to_jiffies(dhd_runtimepm_ms),
4826                                                         time_lapse));
4827                                 }
4828                                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4829                         }
4830                 } else {
4831                         break;
4832                 }
4833         }
4834
4835         complete_and_exit(&tsk->completed, 0);
4836 }
4837
4838 static void dhd_runtimepm(ulong data)
4839 {
4840         dhd_info_t *dhd = (dhd_info_t *)data;
4841
4842         if (dhd->pub.dongle_reset) {
4843                 return;
4844         }
4845
4846         if (dhd->thr_rpm_ctl.thr_pid >= 0) {
4847                 up(&dhd->thr_rpm_ctl.sema);
4848                 return;
4849         }
4850 }
4851
4852 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
4853 {
4854         dhd_os_runtimepm_timer(dhdp, 0);
4855         dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
4856         DHD_ERROR(("DHD Runtime PM Disabled \n"));
4857 }
4858
4859 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
4860 {
4861         dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
4862         DHD_ERROR(("DHD Runtime PM Enabled \n"));
4863 }
4864
4865 #endif /* DHD_PCIE_RUNTIMEPM */
4866
4867
4868 #ifdef ENABLE_ADAPTIVE_SCHED
4869 static void
4870 dhd_sched_policy(int prio)
4871 {
4872         struct sched_param param;
4873         if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
4874                 param.sched_priority = 0;
4875                 setScheduler(current, SCHED_NORMAL, &param);
4876         } else {
4877                 if (get_scheduler_policy(current) != SCHED_FIFO) {
4878                         param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
4879                         setScheduler(current, SCHED_FIFO, &param);
4880                 }
4881         }
4882 }
4883 #endif /* ENABLE_ADAPTIVE_SCHED */
4884 #ifdef DEBUG_CPU_FREQ
4885 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
4886 {
4887         dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
4888         struct cpufreq_freqs *freq = data;
4889         if (dhd) {
4890                 if (!dhd->new_freq)
4891                         goto exit;
4892                 if (val == CPUFREQ_POSTCHANGE) {
4893                         DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
4894                                 freq->new, freq->cpu));
4895                         *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
4896                 }
4897         }
4898 exit:
4899         return 0;
4900 }
4901 #endif /* DEBUG_CPU_FREQ */
4902 static int
4903 dhd_dpc_thread(void *data)
4904 {
4905         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4906         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4907
4908         /* This thread doesn't need any user-level access,
4909          * so get rid of all our resources
4910          */
4911         if (dhd_dpc_prio > 0)
4912         {
4913                 struct sched_param param;
4914                 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
4915                 setScheduler(current, SCHED_FIFO, &param);
4916         }
4917
4918 #ifdef CUSTOM_DPC_CPUCORE
4919         set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
4920 #else
4921         if (dhd->pub.conf->dpc_cpucore >= 0) {
4922                 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
4923                 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
4924         }
4925 #endif
4926 #ifdef CUSTOM_SET_CPUCORE
4927         dhd->pub.current_dpc = current;
4928 #endif /* CUSTOM_SET_CPUCORE */
4929         /* Run until signal received */
4930         while (1) {
4931                 if (!binary_sema_down(tsk)) {
4932 #ifdef ENABLE_ADAPTIVE_SCHED
4933                         dhd_sched_policy(dhd_dpc_prio);
4934 #endif /* ENABLE_ADAPTIVE_SCHED */
4935                         SMP_RD_BARRIER_DEPENDS();
4936                         if (tsk->terminated) {
4937                                 break;
4938                         }
4939
4940                         /* Call bus dpc unless it indicated down (then clean stop) */
4941                         if (dhd->pub.busstate != DHD_BUS_DOWN) {
4942 #ifdef DEBUG_DPC_THREAD_WATCHDOG
4943                                 int resched_cnt = 0;
4944 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
4945                                 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
4946                                 while (dhd_bus_dpc(dhd->pub.bus)) {
4947                                         /* process all data */
4948 #ifdef DEBUG_DPC_THREAD_WATCHDOG
4949                                         resched_cnt++;
4950                                         if (resched_cnt > MAX_RESCHED_CNT) {
4951                                                 DHD_INFO(("%s Calling msleep to"
4952                                                         "let other processes run. \n",
4953                                                         __FUNCTION__));
4954                                                 dhd->pub.dhd_bug_on = true;
4955                                                 resched_cnt = 0;
4956                                                 OSL_SLEEP(1);
4957                                         }
4958 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
4959                                 }
4960                                 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
4961                                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4962                         } else {
4963                                 if (dhd->pub.up)
4964                                         dhd_bus_stop(dhd->pub.bus, TRUE);
4965                                 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4966                         }
4967                 } else {
4968                         break;
4969                 }
4970         }
4971         complete_and_exit(&tsk->completed, 0);
4972 }
4973
4974 static int
4975 dhd_rxf_thread(void *data)
4976 {
4977         tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4978         dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4979 #if defined(WAIT_DEQUEUE)
4980 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
4981         ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
4982 #endif
4983         dhd_pub_t *pub = &dhd->pub;
4984
4985         /* This thread doesn't need any user-level access,
4986          * so get rid of all our resources
4987          */
4988         if (dhd_rxf_prio > 0)
4989         {
4990                 struct sched_param param;
4991                 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
4992                 setScheduler(current, SCHED_FIFO, &param);
4993         }
4994
4995         DAEMONIZE("dhd_rxf");
4996         /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
4997
4998         /*  signal: thread has started */
4999         complete(&tsk->completed);
5000 #ifdef CUSTOM_SET_CPUCORE
5001         dhd->pub.current_rxf = current;
5002 #endif /* CUSTOM_SET_CPUCORE */
5003         /* Run until signal received */
5004         while (1) {
5005                 if (down_interruptible(&tsk->sema) == 0) {
5006                         void *skb;
5007 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
5008                         ulong flags;
5009 #endif
5010 #ifdef ENABLE_ADAPTIVE_SCHED
5011                         dhd_sched_policy(dhd_rxf_prio);
5012 #endif /* ENABLE_ADAPTIVE_SCHED */
5013
5014                         SMP_RD_BARRIER_DEPENDS();
5015
5016                         if (tsk->terminated) {
5017                                 break;
5018                         }
5019                         skb = dhd_rxf_dequeue(pub);
5020
5021                         if (skb == NULL) {
5022                                 continue;
5023                         }
5024                         while (skb) {
5025                                 void *skbnext = PKTNEXT(pub->osh, skb);
5026                                 PKTSETNEXT(pub->osh, skb, NULL);
5027                                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5028                                         __FUNCTION__, __LINE__);
5029 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5030                                 netif_rx_ni(skb);
5031 #else
5032                                 netif_rx(skb);
5033                                 local_irq_save(flags);
5034                                 RAISE_RX_SOFTIRQ();
5035                                 local_irq_restore(flags);
5036
5037 #endif
5038                                 skb = skbnext;
5039                         }
5040 #if defined(WAIT_DEQUEUE)
5041                         if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5042                                 OSL_SLEEP(1);
5043                                 watchdogTime = OSL_SYSUPTIME();
5044                         }
5045 #endif
5046
5047                         DHD_OS_WAKE_UNLOCK(pub);
5048                 } else {
5049                         break;
5050                 }
5051         }
5052         complete_and_exit(&tsk->completed, 0);
5053 }
5054
5055 #ifdef BCMPCIE
5056 void dhd_dpc_enable(dhd_pub_t *dhdp)
5057 {
5058         dhd_info_t *dhd;
5059
5060         if (!dhdp || !dhdp->info)
5061                 return;
5062         dhd = dhdp->info;
5063
5064 #ifdef DHD_LB
5065 #ifdef DHD_LB_RXP
5066         __skb_queue_head_init(&dhd->rx_pend_queue);
5067 #endif /* DHD_LB_RXP */
5068 #ifdef DHD_LB_TXC
5069         if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
5070                 tasklet_enable(&dhd->tx_compl_tasklet);
5071 #endif /* DHD_LB_TXC */
5072 #ifdef DHD_LB_RXC
5073         if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
5074                 tasklet_enable(&dhd->rx_compl_tasklet);
5075 #endif /* DHD_LB_RXC */
5076 #endif /* DHD_LB */
5077         if (atomic_read(&dhd->tasklet.count) ==  1)
5078                 tasklet_enable(&dhd->tasklet);
5079 }
5080 #endif /* BCMPCIE */
5081
5082
5083 #ifdef BCMPCIE
5084 void
5085 dhd_dpc_kill(dhd_pub_t *dhdp)
5086 {
5087         dhd_info_t *dhd;
5088
5089         if (!dhdp) {
5090                 return;
5091         }
5092
5093         dhd = dhdp->info;
5094
5095         if (!dhd) {
5096                 return;
5097         }
5098
5099         if (dhd->thr_dpc_ctl.thr_pid < 0) {
5100                 tasklet_disable(&dhd->tasklet);
5101                 tasklet_kill(&dhd->tasklet);
5102                 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5103         }
5104 #if defined(DHD_LB)
5105 #ifdef DHD_LB_RXP
5106         __skb_queue_purge(&dhd->rx_pend_queue);
5107 #endif /* DHD_LB_RXP */
5108         /* Kill the Load Balancing Tasklets */
5109 #if defined(DHD_LB_TXC)
5110         tasklet_disable(&dhd->tx_compl_tasklet);
5111         tasklet_kill(&dhd->tx_compl_tasklet);
5112 #endif /* DHD_LB_TXC */
5113 #if defined(DHD_LB_RXC)
5114         tasklet_disable(&dhd->rx_compl_tasklet);
5115         tasklet_kill(&dhd->rx_compl_tasklet);
5116 #endif /* DHD_LB_RXC */
5117 #endif /* DHD_LB */
5118 }
5119 #endif /* BCMPCIE */
5120
5121 static void
5122 dhd_dpc(ulong data)
5123 {
5124         dhd_info_t *dhd;
5125
5126         dhd = (dhd_info_t *)data;
5127
5128         /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5129          * down below , wake lock is set,
5130          * the tasklet is initialized in dhd_attach()
5131          */
5132         /* Call bus dpc unless it indicated down (then clean stop) */
5133         if (dhd->pub.busstate != DHD_BUS_DOWN) {
5134                 if (dhd_bus_dpc(dhd->pub.bus)) {
5135                         DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5136                         tasklet_schedule(&dhd->tasklet);
5137                 }
5138         } else {
5139                 dhd_bus_stop(dhd->pub.bus, TRUE);
5140         }
5141 }
5142
5143 void
5144 dhd_sched_dpc(dhd_pub_t *dhdp)
5145 {
5146         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5147
5148         if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5149                 DHD_OS_WAKE_LOCK(dhdp);
5150                 /* If the semaphore does not get up,
5151                 * wake unlock should be done here
5152                 */
5153                 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5154                         DHD_OS_WAKE_UNLOCK(dhdp);
5155                 }
5156                 return;
5157         } else {
5158                 tasklet_schedule(&dhd->tasklet);
5159         }
5160 }
5161
5162 static void
5163 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5164 {
5165         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5166 #ifdef RXF_DEQUEUE_ON_BUSY
5167         int ret = BCME_OK;
5168         int retry = 2;
5169 #endif /* RXF_DEQUEUE_ON_BUSY */
5170
5171         DHD_OS_WAKE_LOCK(dhdp);
5172
5173         DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5174 #ifdef RXF_DEQUEUE_ON_BUSY
5175         do {
5176                 ret = dhd_rxf_enqueue(dhdp, skb);
5177                 if (ret == BCME_OK || ret == BCME_ERROR)
5178                         break;
5179                 else
5180                         OSL_SLEEP(50); /* waiting for dequeueing */
5181         } while (retry-- > 0);
5182
5183         if (retry <= 0 && ret == BCME_BUSY) {
5184                 void *skbp = skb;
5185
5186                 while (skbp) {
5187                         void *skbnext = PKTNEXT(dhdp->osh, skbp);
5188                         PKTSETNEXT(dhdp->osh, skbp, NULL);
5189                         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5190                                 __FUNCTION__, __LINE__);
5191                         netif_rx_ni(skbp);
5192                         skbp = skbnext;
5193                 }
5194                 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
5195         } else {
5196                 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5197                         up(&dhd->thr_rxf_ctl.sema);
5198                 }
5199         }
5200 #else /* RXF_DEQUEUE_ON_BUSY */
5201         do {
5202                 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5203                         break;
5204         } while (1);
5205         if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5206                 up(&dhd->thr_rxf_ctl.sema);
5207         }
5208         return;
5209 #endif /* RXF_DEQUEUE_ON_BUSY */
5210 }
5211
5212 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5213 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5214
5215 #ifdef TOE
5216 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5217 static int
5218 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5219 {
5220         wl_ioctl_t ioc;
5221         char buf[32];
5222         int ret;
5223
5224         memset(&ioc, 0, sizeof(ioc));
5225
5226         ioc.cmd = WLC_GET_VAR;
5227         ioc.buf = buf;
5228         ioc.len = (uint)sizeof(buf);
5229         ioc.set = FALSE;
5230
5231         strncpy(buf, "toe_ol", sizeof(buf) - 1);
5232         buf[sizeof(buf) - 1] = '\0';
5233         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5234                 /* Check for older dongle image that doesn't support toe_ol */
5235                 if (ret == -EIO) {
5236                         DHD_ERROR(("%s: toe not supported by device\n",
5237                                 dhd_ifname(&dhd->pub, ifidx)));
5238                         return -EOPNOTSUPP;
5239                 }
5240
5241                 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5242                 return ret;
5243         }
5244
5245         memcpy(toe_ol, buf, sizeof(uint32));
5246         return 0;
5247 }
5248
5249 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5250 static int
5251 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5252 {
5253         wl_ioctl_t ioc;
5254         char buf[32];
5255         int toe, ret;
5256
5257         memset(&ioc, 0, sizeof(ioc));
5258
5259         ioc.cmd = WLC_SET_VAR;
5260         ioc.buf = buf;
5261         ioc.len = (uint)sizeof(buf);
5262         ioc.set = TRUE;
5263
5264         /* Set toe_ol as requested */
5265
5266         strncpy(buf, "toe_ol", sizeof(buf) - 1);
5267         buf[sizeof(buf) - 1] = '\0';
5268         memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
5269
5270         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5271                 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5272                         dhd_ifname(&dhd->pub, ifidx), ret));
5273                 return ret;
5274         }
5275
5276         /* Enable toe globally only if any components are enabled. */
5277
5278         toe = (toe_ol != 0);
5279
5280         strcpy(buf, "toe");
5281         memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
5282
5283         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5284                 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5285                 return ret;
5286         }
5287
5288         return 0;
5289 }
5290 #endif /* TOE */
5291
5292 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5293 void dhd_set_scb_probe(dhd_pub_t *dhd)
5294 {
5295         int ret = 0;
5296         wl_scb_probe_t scb_probe;
5297         char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5298
5299         memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
5300
5301         if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5302                 return;
5303         }
5304
5305         bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
5306
5307         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
5308                 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5309         }
5310
5311         memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5312
5313         scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5314
5315         bcm_mkiovar("scb_probe", (char *)&scb_probe,
5316                 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
5317         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5318                 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5319                 return;
5320         }
5321 }
5322 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5323
5324 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
5325 static void
5326 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5327 {
5328         dhd_info_t *dhd = DHD_DEV_INFO(net);
5329
5330         snprintf(info->driver, sizeof(info->driver), "wl");
5331         snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5332 }
5333
5334 struct ethtool_ops dhd_ethtool_ops = {
5335         .get_drvinfo = dhd_ethtool_get_drvinfo
5336 };
5337 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
5338
5339
5340 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5341 static int
5342 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5343 {
5344         struct ethtool_drvinfo info;
5345         char drvname[sizeof(info.driver)];
5346         uint32 cmd;
5347 #ifdef TOE
5348         struct ethtool_value edata;
5349         uint32 toe_cmpnt, csum_dir;
5350         int ret;
5351 #endif
5352
5353         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5354
5355         /* all ethtool calls start with a cmd word */
5356         if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5357                 return -EFAULT;
5358
5359         switch (cmd) {
5360         case ETHTOOL_GDRVINFO:
5361                 /* Copy out any request driver name */
5362                 if (copy_from_user(&info, uaddr, sizeof(info)))
5363                         return -EFAULT;
5364                 strncpy(drvname, info.driver, sizeof(info.driver));
5365                 drvname[sizeof(info.driver)-1] = '\0';
5366
5367                 /* clear struct for return */
5368                 memset(&info, 0, sizeof(info));
5369                 info.cmd = cmd;
5370
5371                 /* if dhd requested, identify ourselves */
5372                 if (strcmp(drvname, "?dhd") == 0) {
5373                         snprintf(info.driver, sizeof(info.driver), "dhd");
5374                         strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5375                         info.version[sizeof(info.version) - 1] = '\0';
5376                 }
5377
5378                 /* otherwise, require dongle to be up */
5379                 else if (!dhd->pub.up) {
5380                         DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5381                         return -ENODEV;
5382                 }
5383
5384                 /* finally, report dongle driver type */
5385                 else if (dhd->pub.iswl)
5386                         snprintf(info.driver, sizeof(info.driver), "wl");
5387                 else
5388                         snprintf(info.driver, sizeof(info.driver), "xx");
5389
5390                 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5391                 if (copy_to_user(uaddr, &info, sizeof(info)))
5392                         return -EFAULT;
5393                 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5394                          (int)sizeof(drvname), drvname, info.driver));
5395                 break;
5396
5397 #ifdef TOE
5398         /* Get toe offload components from dongle */
5399         case ETHTOOL_GRXCSUM:
5400         case ETHTOOL_GTXCSUM:
5401                 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5402                         return ret;
5403
5404                 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5405
5406                 edata.cmd = cmd;
5407                 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5408
5409                 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5410                         return -EFAULT;
5411                 break;
5412
5413         /* Set toe offload components in dongle */
5414         case ETHTOOL_SRXCSUM:
5415         case ETHTOOL_STXCSUM:
5416                 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5417                         return -EFAULT;
5418
5419                 /* Read the current settings, update and write back */
5420                 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5421                         return ret;
5422
5423                 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5424
5425                 if (edata.data != 0)
5426                         toe_cmpnt |= csum_dir;
5427                 else
5428                         toe_cmpnt &= ~csum_dir;
5429
5430                 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5431                         return ret;
5432
5433                 /* If setting TX checksum mode, tell Linux the new mode */
5434                 if (cmd == ETHTOOL_STXCSUM) {
5435                         if (edata.data)
5436                                 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5437                         else
5438                                 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5439                 }
5440
5441                 break;
5442 #endif /* TOE */
5443
5444         default:
5445                 return -EOPNOTSUPP;
5446         }
5447
5448         return 0;
5449 }
5450 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5451
5452 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5453 {
5454         dhd_info_t *dhd;
5455
5456         if (!dhdp) {
5457                 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5458                 return FALSE;
5459         }
5460
5461         if (!dhdp->up)
5462                 return FALSE;
5463
5464         dhd = (dhd_info_t *)dhdp->info;
5465 #if !defined(BCMPCIE)
5466         if (dhd->thr_dpc_ctl.thr_pid < 0) {
5467                 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5468                 return FALSE;
5469         }
5470 #endif 
5471
5472         if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5473                 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5474 #ifdef BCMPCIE
5475                 DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d d3acke=%d e=%d s=%d\n",
5476                         __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5477                         dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5478 #else
5479                 DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5480                         dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5481 #endif /* BCMPCIE */
5482                 if (dhdp->hang_reason == 0) {
5483                         if (dhdp->dongle_trap_occured) {
5484                                 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5485 #ifdef BCMPCIE
5486                         } else if (dhdp->d3ackcnt_timeout) {
5487                                 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
5488 #endif /* BCMPCIE */
5489                         } else {
5490                                 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
5491                         }
5492                 }
5493                 net_os_send_hang_message(net);
5494                 return TRUE;
5495         }
5496         return FALSE;
5497 }
5498
5499 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
5500 {
5501         int bcmerror = BCME_OK;
5502         int buflen = 0;
5503         struct net_device *net;
5504
5505         net = dhd_idx2net(pub, ifidx);
5506         if (!net) {
5507                 bcmerror = BCME_BADARG;
5508                 goto done;
5509         }
5510
5511         if (data_buf)
5512                 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
5513
5514         /* check for local dhd ioctl and handle it */
5515         if (ioc->driver == DHD_IOCTL_MAGIC) {
5516                 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
5517                 if (bcmerror)
5518                         pub->bcmerror = bcmerror;
5519                 goto done;
5520         }
5521
5522         /* send to dongle (must be up, and wl). */
5523         if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
5524                 if (allow_delay_fwdl) {
5525                         int ret = dhd_bus_start(pub);
5526                         if (ret != 0) {
5527                                 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5528                                 bcmerror = BCME_DONGLE_DOWN;
5529                                 goto done;
5530                         }
5531                 } else {
5532                         bcmerror = BCME_DONGLE_DOWN;
5533                         goto done;
5534                 }
5535         }
5536
5537         if (!pub->iswl) {
5538                 bcmerror = BCME_DONGLE_DOWN;
5539                 goto done;
5540         }
5541
5542         /*
5543          * Flush the TX queue if required for proper message serialization:
5544          * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
5545          * prevent M4 encryption and
5546          * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
5547          * prevent disassoc frame being sent before WPS-DONE frame.
5548          */
5549         if (ioc->cmd == WLC_SET_KEY ||
5550             (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5551              strncmp("wsec_key", data_buf, 9) == 0) ||
5552             (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5553              strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
5554             ioc->cmd == WLC_DISASSOC)
5555                 dhd_wait_pend8021x(net);
5556
5557 #ifdef WLMEDIA_HTSF
5558         if (data_buf) {
5559                 /*  short cut wl ioctl calls here  */
5560                 if (strcmp("htsf", data_buf) == 0) {
5561                         dhd_ioctl_htsf_get(dhd, 0);
5562                         return BCME_OK;
5563                 }
5564
5565                 if (strcmp("htsflate", data_buf) == 0) {
5566                         if (ioc->set) {
5567                                 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
5568                                 memset(&maxdelayts, 0, sizeof(tstamp_t));
5569                                 maxdelay = 0;
5570                                 tspktcnt = 0;
5571                                 maxdelaypktno = 0;
5572                                 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5573                                 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5574                                 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5575                                 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5576                         } else {
5577                                 dhd_dump_latency();
5578                         }
5579                         return BCME_OK;
5580                 }
5581                 if (strcmp("htsfclear", data_buf) == 0) {
5582                         memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5583                         memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5584                         memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5585                         memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5586                         htsf_seqnum = 0;
5587                         return BCME_OK;
5588                 }
5589                 if (strcmp("htsfhis", data_buf) == 0) {
5590                         dhd_dump_htsfhisto(&vi_d1, "H to D");
5591                         dhd_dump_htsfhisto(&vi_d2, "D to D");
5592                         dhd_dump_htsfhisto(&vi_d3, "D to H");
5593                         dhd_dump_htsfhisto(&vi_d4, "H to H");
5594                         return BCME_OK;
5595                 }
5596                 if (strcmp("tsport", data_buf) == 0) {
5597                         if (ioc->set) {
5598                                 memcpy(&tsport, data_buf + 7, 4);
5599                         } else {
5600                                 DHD_ERROR(("current timestamp port: %d \n", tsport));
5601                         }
5602                         return BCME_OK;
5603                 }
5604         }
5605 #endif /* WLMEDIA_HTSF */
5606
5607         if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
5608                 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
5609 #ifdef BCM_FD_AGGR
5610                 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5611 #else
5612                 bcmerror = BCME_UNSUPPORTED;
5613 #endif
5614                 goto done;
5615         }
5616
5617 #ifdef DHD_DEBUG
5618         if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
5619                 if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
5620                         /* Print  IOVAR Information */
5621                         DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
5622                                 __FUNCTION__, (char *)data_buf, ioc->set));
5623                         if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5624                                 prhex(NULL, data_buf + strlen(data_buf) + 1,
5625                                         buflen - strlen(data_buf) - 1);
5626                         }
5627                 } else {
5628                         /* Print  IOCTL Information */
5629                         DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
5630                                 __FUNCTION__, ioc->cmd, ioc->set));
5631                         if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5632                                 prhex(NULL, data_buf, buflen);
5633                         }
5634                 }
5635         }
5636 #endif /* DHD_DEBUG */
5637
5638         bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5639
5640 done:
5641         dhd_check_hang(net, pub, bcmerror);
5642
5643         return bcmerror;
5644 }
5645
5646 static int
5647 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
5648 {
5649         dhd_info_t *dhd = DHD_DEV_INFO(net);
5650         dhd_ioctl_t ioc;
5651         int ifidx;
5652         int ret;
5653         void *local_buf = NULL;
5654         u16 buflen = 0;
5655
5656         DHD_OS_WAKE_LOCK(&dhd->pub);
5657         DHD_PERIM_LOCK(&dhd->pub);
5658
5659         /* Interface up check for built-in type */
5660         if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
5661                 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
5662                 ret = BCME_NOTUP;
5663                 goto exit;
5664         }
5665
5666         /* send to dongle only if we are not waiting for reload already */
5667         if (dhd->pub.hang_was_sent) {
5668                 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
5669                 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
5670                 ret = BCME_DONGLE_DOWN;
5671                 goto exit;
5672         }
5673
5674         ifidx = dhd_net2idx(dhd, net);
5675         DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
5676
5677         if (ifidx == DHD_BAD_IF) {
5678                 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
5679                 ret = -1;
5680                 goto exit;
5681         }
5682
5683 #if defined(WL_WIRELESS_EXT)
5684         /* linux wireless extensions */
5685         if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
5686                 /* may recurse, do NOT lock */
5687                 ret = wl_iw_ioctl(net, ifr, cmd);
5688                 goto exit;
5689         }
5690 #endif /* defined(WL_WIRELESS_EXT) */
5691
5692 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5693         if (cmd == SIOCETHTOOL) {
5694                 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
5695                 goto exit;
5696         }
5697 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5698
5699         if (cmd == SIOCDEVPRIVATE+1) {
5700                 ret = wl_android_priv_cmd(net, ifr, cmd);
5701                 dhd_check_hang(net, &dhd->pub, ret);
5702                 goto exit;
5703         }
5704
5705         if (cmd != SIOCDEVPRIVATE) {
5706                 ret = -EOPNOTSUPP;
5707                 goto exit;
5708         }
5709
5710         memset(&ioc, 0, sizeof(ioc));
5711
5712 #ifdef CONFIG_COMPAT
5713         if (is_compat_task()) {
5714                 compat_wl_ioctl_t compat_ioc;
5715                 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
5716                         ret = BCME_BADADDR;
5717                         goto done;
5718                 }
5719                 ioc.cmd = compat_ioc.cmd;
5720                 if (ioc.cmd & WLC_SPEC_FLAG) {
5721                         memset(&ioc, 0, sizeof(ioc));
5722                         /* Copy the ioc control structure part of ioctl request */
5723                         if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5724                                 ret = BCME_BADADDR;
5725                                 goto done;
5726                         }
5727                         ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
5728
5729                         /* To differentiate between wl and dhd read 4 more byes */
5730                         if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5731                                 sizeof(uint)) != 0)) {
5732                                 ret = BCME_BADADDR;
5733                                 goto done;
5734                         }
5735
5736                 } else { /* ioc.cmd & WLC_SPEC_FLAG */
5737                         ioc.buf = compat_ptr(compat_ioc.buf);
5738                         ioc.len = compat_ioc.len;
5739                         ioc.set = compat_ioc.set;
5740                         ioc.used = compat_ioc.used;
5741                         ioc.needed = compat_ioc.needed;
5742                         /* To differentiate between wl and dhd read 4 more byes */
5743                         if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
5744                                 sizeof(uint)) != 0)) {
5745                                 ret = BCME_BADADDR;
5746                                 goto done;
5747                         }
5748                 } /* ioc.cmd & WLC_SPEC_FLAG */
5749         } else
5750 #endif /* CONFIG_COMPAT */
5751         {
5752                 /* Copy the ioc control structure part of ioctl request */
5753                 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5754                         ret = BCME_BADADDR;
5755                         goto done;
5756                 }
5757 #ifdef CONFIG_COMPAT
5758                 ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
5759 #endif
5760                 /* To differentiate between wl and dhd read 4 more byes */
5761                 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5762                         sizeof(uint)) != 0)) {
5763                         ret = BCME_BADADDR;
5764                         goto done;
5765                 }
5766         }
5767
5768         if (!capable(CAP_NET_ADMIN)) {
5769                 ret = BCME_EPERM;
5770                 goto done;
5771         }
5772
5773         if (ioc.len > 0) {
5774                 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
5775                 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
5776                         ret = BCME_NOMEM;
5777                         goto done;
5778                 }
5779
5780                 DHD_PERIM_UNLOCK(&dhd->pub);
5781                 if (copy_from_user(local_buf, ioc.buf, buflen)) {
5782                         DHD_PERIM_LOCK(&dhd->pub);
5783                         ret = BCME_BADADDR;
5784                         goto done;
5785                 }
5786                 DHD_PERIM_LOCK(&dhd->pub);
5787
5788                 *(char *)(local_buf + buflen) = '\0';
5789         }
5790
5791         ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
5792
5793         if (!ret && buflen && local_buf && ioc.buf) {
5794                 DHD_PERIM_UNLOCK(&dhd->pub);
5795                 if (copy_to_user(ioc.buf, local_buf, buflen))
5796                         ret = -EFAULT;
5797                 DHD_PERIM_LOCK(&dhd->pub);
5798         }
5799
5800 done:
5801         if (local_buf)
5802                 MFREE(dhd->pub.osh, local_buf, buflen+1);
5803
5804 exit:
5805         DHD_PERIM_UNLOCK(&dhd->pub);
5806         DHD_OS_WAKE_UNLOCK(&dhd->pub);
5807
5808         return OSL_ERROR(ret);
5809 }
5810
5811
5812 #ifdef FIX_CPU_MIN_CLOCK
5813 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
5814 {
5815         if (dhd) {
5816 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5817                 mutex_init(&dhd->cpufreq_fix);
5818 #endif
5819                 dhd->cpufreq_fix_status = FALSE;
5820         }
5821         return 0;
5822 }
5823
5824 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
5825 {
5826 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5827         mutex_lock(&dhd->cpufreq_fix);
5828 #endif
5829         if (dhd && !dhd->cpufreq_fix_status) {
5830                 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
5831 #ifdef FIX_BUS_MIN_CLOCK
5832                 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
5833 #endif /* FIX_BUS_MIN_CLOCK */
5834                 DHD_ERROR(("pm_qos_add_requests called\n"));
5835
5836                 dhd->cpufreq_fix_status = TRUE;
5837         }
5838 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5839         mutex_unlock(&dhd->cpufreq_fix);
5840 #endif
5841 }
5842
5843 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
5844 {
5845 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5846         mutex_lock(&dhd ->cpufreq_fix);
5847 #endif
5848         if (dhd && dhd->cpufreq_fix_status != TRUE) {
5849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5850                 mutex_unlock(&dhd->cpufreq_fix);
5851 #endif
5852                 return;
5853         }
5854
5855         pm_qos_remove_request(&dhd->dhd_cpu_qos);
5856 #ifdef FIX_BUS_MIN_CLOCK
5857         pm_qos_remove_request(&dhd->dhd_bus_qos);
5858 #endif /* FIX_BUS_MIN_CLOCK */
5859         DHD_ERROR(("pm_qos_add_requests called\n"));
5860
5861         dhd->cpufreq_fix_status = FALSE;
5862 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5863         mutex_unlock(&dhd->cpufreq_fix);
5864 #endif
5865 }
5866 #endif /* FIX_CPU_MIN_CLOCK */
5867
5868 #define MAX_TRY_CNT             5 /* Number of tries to disable deepsleep */
5869 int dhd_deepsleep(dhd_info_t *dhd, int flag)
5870 {
5871         char iovbuf[20];
5872         uint powervar = 0;
5873         dhd_pub_t *dhdp;
5874         int cnt = 0;
5875         int ret = 0;
5876
5877         dhdp = &dhd->pub;
5878
5879         switch (flag) {
5880                 case 1 :  /* Deepsleep on */
5881                         DHD_ERROR(("dhd_deepsleep: ON\n"));
5882                         /* give some time to sysioc_work before deepsleep */
5883                         OSL_SLEEP(200);
5884 #ifdef PKT_FILTER_SUPPORT
5885                         /* disable pkt filter */
5886                         dhd_enable_packet_filter(0, dhdp);
5887 #endif /* PKT_FILTER_SUPPORT */
5888                         /* Disable MPC */
5889                         powervar = 0;
5890                         memset(iovbuf, 0, sizeof(iovbuf));
5891                         bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5892                         dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5893
5894                         /* Enable Deepsleep */
5895                         powervar = 1;
5896                         memset(iovbuf, 0, sizeof(iovbuf));
5897                         bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5898                         dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5899                         break;
5900
5901                 case 0: /* Deepsleep Off */
5902                         DHD_ERROR(("dhd_deepsleep: OFF\n"));
5903
5904                         /* Disable Deepsleep */
5905                         for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
5906                                 powervar = 0;
5907                                 memset(iovbuf, 0, sizeof(iovbuf));
5908                                 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5909                                         iovbuf, sizeof(iovbuf));
5910                                 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
5911                                         sizeof(iovbuf), TRUE, 0);
5912
5913                                 memset(iovbuf, 0, sizeof(iovbuf));
5914                                 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5915                                         iovbuf, sizeof(iovbuf));
5916                                 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
5917                                         sizeof(iovbuf), FALSE, 0)) < 0) {
5918                                         DHD_ERROR(("the error of dhd deepsleep status"
5919                                                 " ret value :%d\n", ret));
5920                                 } else {
5921                                         if (!(*(int *)iovbuf)) {
5922                                                 DHD_ERROR(("deepsleep mode is 0,"
5923                                                         " count: %d\n", cnt));
5924                                                 break;
5925                                         }
5926                                 }
5927                         }
5928
5929                         /* Enable MPC */
5930                         powervar = 1;
5931                         memset(iovbuf, 0, sizeof(iovbuf));
5932                         bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5933                         dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5934                         break;
5935         }
5936
5937         return 0;
5938 }
5939
5940 static int
5941 dhd_stop(struct net_device *net)
5942 {
5943         int ifidx = 0;
5944         dhd_info_t *dhd = DHD_DEV_INFO(net);
5945         DHD_OS_WAKE_LOCK(&dhd->pub);
5946         DHD_PERIM_LOCK(&dhd->pub);
5947         printf("%s: Enter %p\n", __FUNCTION__, net);
5948         dhd->pub.rxcnt_timeout = 0;
5949         dhd->pub.txcnt_timeout = 0;
5950
5951 #ifdef BCMPCIE
5952         dhd->pub.d3ackcnt_timeout = 0;
5953 #endif /* BCMPCIE */
5954
5955         if (dhd->pub.up == 0) {
5956                 goto exit;
5957         }
5958
5959         dhd_if_flush_sta(DHD_DEV_IFP(net));
5960
5961         /* Disable Runtime PM before interface down */
5962         DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5963
5964 #ifdef FIX_CPU_MIN_CLOCK
5965         if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
5966                 dhd_rollback_cpu_freq(dhd);
5967 #endif /* FIX_CPU_MIN_CLOCK */
5968
5969         ifidx = dhd_net2idx(dhd, net);
5970         BCM_REFERENCE(ifidx);
5971
5972         /* Set state and stop OS transmissions */
5973         netif_stop_queue(net);
5974         dhd->pub.up = 0;
5975
5976 #ifdef WL_CFG80211
5977         if (ifidx == 0) {
5978                 dhd_if_t *ifp;
5979                 wl_cfg80211_down(NULL);
5980
5981                 ifp = dhd->iflist[0];
5982                 ASSERT(ifp && ifp->net);
5983                 /*
5984                  * For CFG80211: Clean up all the left over virtual interfaces
5985                  * when the primary Interface is brought down. [ifconfig wlan0 down]
5986                  */
5987                 if (!dhd_download_fw_on_driverload) {
5988                         if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
5989                                 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5990                                 int i;
5991
5992 #ifdef WL_CFG80211_P2P_DEV_IF
5993                                 wl_cfg80211_del_p2p_wdev();
5994 #endif /* WL_CFG80211_P2P_DEV_IF */
5995
5996                                 dhd_net_if_lock_local(dhd);
5997                                 for (i = 1; i < DHD_MAX_IFS; i++)
5998                                         dhd_remove_if(&dhd->pub, i, FALSE);
5999
6000                                 if (ifp && ifp->net) {
6001                                         dhd_if_del_sta_list(ifp);
6002                                 }
6003
6004 #ifdef ARP_OFFLOAD_SUPPORT
6005                                 if (dhd_inetaddr_notifier_registered) {
6006                                         dhd_inetaddr_notifier_registered = FALSE;
6007                                         unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6008                                 }
6009 #endif /* ARP_OFFLOAD_SUPPORT */
6010 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6011                                 if (dhd_inet6addr_notifier_registered) {
6012                                         dhd_inet6addr_notifier_registered = FALSE;
6013                                         unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6014                                 }
6015 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6016                                 dhd_net_if_unlock_local(dhd);
6017                         }
6018                         cancel_work_sync(dhd->dhd_deferred_wq);
6019 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6020                         __skb_queue_purge(&dhd->rx_pend_queue);
6021 #endif /* DHD_LB && DHD_LB_RXP */
6022                 }
6023
6024 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6025                 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6026 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6027 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6028                 if (ifp->net == dhd->rx_napi_netdev) {
6029                         DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6030                                 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6031                         skb_queue_purge(&dhd->rx_napi_queue);
6032                         napi_disable(&dhd->rx_napi_struct);
6033                         netif_napi_del(&dhd->rx_napi_struct);
6034                         dhd->rx_napi_netdev = NULL;
6035                 }
6036 #endif /* DHD_LB && DHD_LB_RXP */
6037
6038         }
6039 #endif /* WL_CFG80211 */
6040
6041 #ifdef PROP_TXSTATUS
6042         dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6043 #endif
6044         /* Stop the protocol module */
6045         dhd_prot_stop(&dhd->pub);
6046
6047         OLD_MOD_DEC_USE_COUNT;
6048 exit:
6049         if (ifidx == 0 && !dhd_download_fw_on_driverload)
6050                 wl_android_wifi_off(net, TRUE);
6051         else {
6052                 if (dhd->pub.conf->deepsleep)
6053                         dhd_deepsleep(dhd, 1);
6054         }
6055         dhd->pub.hang_was_sent = 0;
6056
6057         /* Clear country spec for for built-in type driver */
6058         if (!dhd_download_fw_on_driverload) {
6059                 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6060                 dhd->pub.dhd_cspec.rev = 0;
6061                 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6062         }
6063
6064 #ifdef BCMDBGFS
6065         dhd_dbg_remove();
6066 #endif
6067
6068         DHD_PERIM_UNLOCK(&dhd->pub);
6069         DHD_OS_WAKE_UNLOCK(&dhd->pub);
6070
6071         /* Destroy wakelock */
6072         if (!dhd_download_fw_on_driverload &&
6073                 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6074                 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6075                 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6076         }
6077         printf("%s: Exit\n", __FUNCTION__);
6078
6079         return 0;
6080 }
6081
6082 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
6083 extern bool g_first_broadcast_scan;
6084 #endif 
6085
6086 #ifdef WL11U
6087 static int dhd_interworking_enable(dhd_pub_t *dhd)
6088 {
6089         char iovbuf[WLC_IOCTL_SMLEN];
6090         uint32 enable = true;
6091         int ret = BCME_OK;
6092
6093         bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
6094         ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6095         if (ret < 0) {
6096                 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6097         }
6098
6099         if (ret == BCME_OK) {
6100                 /* basic capabilities for HS20 REL2 */
6101                 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
6102                 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
6103                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6104                 if (ret < 0) {
6105                         DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
6106                 }
6107         }
6108
6109         return ret;
6110 }
6111 #endif /* WL11u */
6112
6113 static int
6114 dhd_open(struct net_device *net)
6115 {
6116         dhd_info_t *dhd = DHD_DEV_INFO(net);
6117 #ifdef TOE
6118         uint32 toe_ol;
6119 #endif
6120 #ifdef BCM_FD_AGGR
6121         char iovbuf[WLC_IOCTL_SMLEN];
6122         dbus_config_t config;
6123         uint32 agglimit = 0;
6124         uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
6125 #endif /* BCM_FD_AGGR */
6126         int ifidx;
6127         int32 ret = 0;
6128
6129         if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
6130                 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6131                 return -1;
6132         }
6133
6134         printf("%s: Enter %p\n", __FUNCTION__, net);
6135 #if defined(MULTIPLE_SUPPLICANT)
6136 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6137         if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6138                 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
6139         }
6140         mutex_lock(&_dhd_sdio_mutex_lock_);
6141 #endif
6142 #endif /* MULTIPLE_SUPPLICANT */
6143         /* Init wakelock */
6144         if (!dhd_download_fw_on_driverload &&
6145                 !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6146                 DHD_OS_WAKE_LOCK_INIT(dhd);
6147                 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6148         }
6149
6150 #ifdef PREVENT_REOPEN_DURING_HANG
6151         /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6152         if (dhd->pub.hang_was_sent == 1) {
6153                 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6154                 /* Force to bring down WLAN interface in case dhd_stop() is not called
6155                  * from the upper layer when HANG event is triggered.
6156                  */
6157                 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6158                         DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6159                         dhd_stop(net);
6160                 } else {
6161                         return -1;
6162                 }
6163         }
6164 #endif /* PREVENT_REOPEN_DURING_HANG */
6165
6166
6167         DHD_OS_WAKE_LOCK(&dhd->pub);
6168         DHD_PERIM_LOCK(&dhd->pub);
6169         dhd->pub.dongle_trap_occured = 0;
6170         dhd->pub.hang_was_sent = 0;
6171         dhd->pub.hang_reason = 0;
6172 #ifdef DHD_LOSSLESS_ROAMING
6173         dhd->pub.dequeue_prec_map = ALLPRIO;
6174 #endif
6175 #if 0
6176         /*
6177          * Force start if ifconfig_up gets called before START command
6178          *  We keep WEXT's wl_control_wl_start to provide backward compatibility
6179          *  This should be removed in the future
6180          */
6181         ret = wl_control_wl_start(net);
6182         if (ret != 0) {
6183                 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6184                 ret = -1;
6185                 goto exit;
6186         }
6187 #endif
6188
6189         ifidx = dhd_net2idx(dhd, net);
6190         DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6191
6192         if (ifidx < 0) {
6193                 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6194                 ret = -1;
6195                 goto exit;
6196         }
6197
6198         if (!dhd->iflist[ifidx]) {
6199                 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6200                 ret = -1;
6201                 goto exit;
6202         }
6203
6204         if (ifidx == 0) {
6205                 atomic_set(&dhd->pend_8021x_cnt, 0);
6206                 if (!dhd_download_fw_on_driverload) {
6207                         DHD_ERROR(("\n%s\n", dhd_version));
6208 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
6209                         g_first_broadcast_scan = TRUE;
6210 #endif 
6211                         ret = wl_android_wifi_on(net);
6212                         if (ret != 0) {
6213                                 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
6214                                         __FUNCTION__, ret));
6215                                 ret = -1;
6216                                 goto exit;
6217                         }
6218                 }
6219 #ifdef FIX_CPU_MIN_CLOCK
6220                 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
6221                         dhd_init_cpufreq_fix(dhd);
6222                         dhd_fix_cpu_freq(dhd);
6223                 }
6224 #endif /* FIX_CPU_MIN_CLOCK */
6225
6226                 if (dhd->pub.busstate != DHD_BUS_DATA) {
6227
6228                         /* try to bring up bus */
6229                         DHD_PERIM_UNLOCK(&dhd->pub);
6230                         ret = dhd_bus_start(&dhd->pub);
6231                         DHD_PERIM_LOCK(&dhd->pub);
6232                         if (ret) {
6233                                 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6234                                 ret = -1;
6235                                 goto exit;
6236                         }
6237
6238                 }
6239                 if (dhd_download_fw_on_driverload) {
6240                         if (dhd->pub.conf->deepsleep)
6241                                 dhd_deepsleep(dhd, 0);
6242                 }
6243
6244 #ifdef BCM_FD_AGGR
6245                 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
6246
6247
6248                 memset(iovbuf, 0, sizeof(iovbuf));
6249                 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
6250                         iovbuf, sizeof(iovbuf));
6251
6252                 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
6253                         agglimit = *(uint32 *)iovbuf;
6254                         config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
6255                         config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
6256                         DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
6257                                 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
6258                         if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
6259                                 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
6260                         }
6261                 } else {
6262                         DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
6263                         rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
6264                 }
6265
6266                 /* Set aggregation for TX */
6267                 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
6268                         rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
6269
6270                 /* Set aggregation for RX */
6271                 memset(iovbuf, 0, sizeof(iovbuf));
6272                 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
6273                 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
6274                         dhd->pub.info->fdaggr = 0;
6275                         if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
6276                                 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
6277                         if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
6278                                 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
6279                 } else {
6280                         DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
6281                 }
6282 #endif /* BCM_FD_AGGR */
6283
6284                 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
6285                 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6286
6287 #ifdef TOE
6288                 /* Get current TOE mode from dongle */
6289                 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
6290                         dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
6291                 } else {
6292                         dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
6293                 }
6294 #endif /* TOE */
6295
6296 #if defined(WL_CFG80211)
6297                 if (unlikely(wl_cfg80211_up(NULL))) {
6298                         DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
6299                         ret = -1;
6300                         goto exit;
6301                 }
6302                 if (!dhd_download_fw_on_driverload) {
6303 #ifdef ARP_OFFLOAD_SUPPORT
6304                         dhd->pend_ipaddr = 0;
6305                         if (!dhd_inetaddr_notifier_registered) {
6306                                 dhd_inetaddr_notifier_registered = TRUE;
6307                                 register_inetaddr_notifier(&dhd_inetaddr_notifier);
6308                         }
6309 #endif /* ARP_OFFLOAD_SUPPORT */
6310 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6311                         if (!dhd_inet6addr_notifier_registered) {
6312                                 dhd_inet6addr_notifier_registered = TRUE;
6313                                 register_inet6addr_notifier(&dhd_inet6addr_notifier);
6314                         }
6315 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6316 #ifdef DHD_LB
6317                         DHD_LB_STATS_INIT(&dhd->pub);
6318 #ifdef DHD_LB_RXP
6319                         __skb_queue_head_init(&dhd->rx_pend_queue);
6320 #endif /* DHD_LB_RXP */
6321 #endif /* DHD_LB */
6322                 }
6323
6324 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6325 #if defined(SET_RPS_CPUS)
6326                 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6327 #else
6328                 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
6329 #endif 
6330 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6331 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6332                 if (dhd->rx_napi_netdev == NULL) {
6333                         dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
6334                         memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
6335                         netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
6336                                         dhd_napi_poll, dhd_napi_weight);
6337                         DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
6338                                         __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6339                         napi_enable(&dhd->rx_napi_struct);
6340                         DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
6341                         skb_queue_head_init(&dhd->rx_napi_queue);
6342                 }
6343 #endif /* DHD_LB && DHD_LB_RXP */
6344 #if defined(NUM_SCB_MAX_PROBE)
6345                 dhd_set_scb_probe(&dhd->pub);
6346 #endif /* NUM_SCB_MAX_PROBE */
6347 #endif /* WL_CFG80211 */
6348         }
6349
6350         /* Allow transmit calls */
6351         netif_start_queue(net);
6352         dhd->pub.up = 1;
6353
6354         OLD_MOD_INC_USE_COUNT;
6355
6356 #ifdef BCMDBGFS
6357         dhd_dbg_init(&dhd->pub);
6358 #endif
6359
6360 exit:
6361         if (ret) {
6362                 dhd_stop(net);
6363         }
6364
6365         DHD_PERIM_UNLOCK(&dhd->pub);
6366         DHD_OS_WAKE_UNLOCK(&dhd->pub);
6367
6368 #if defined(MULTIPLE_SUPPLICANT)
6369 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6370         mutex_unlock(&_dhd_sdio_mutex_lock_);
6371 #endif
6372 #endif /* MULTIPLE_SUPPLICANT */
6373
6374         printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
6375         return ret;
6376 }
6377
6378 int dhd_do_driver_init(struct net_device *net)
6379 {
6380         dhd_info_t *dhd = NULL;
6381
6382         if (!net) {
6383                 DHD_ERROR(("Primary Interface not initialized \n"));
6384                 return -EINVAL;
6385         }
6386
6387 #ifdef MULTIPLE_SUPPLICANT
6388 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
6389         if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6390                 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
6391                 return 0;
6392         }
6393 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
6394 #endif /* MULTIPLE_SUPPLICANT */
6395
6396         /*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
6397         dhd = DHD_DEV_INFO(net);
6398
6399         /* If driver is already initialized, do nothing
6400          */
6401         if (dhd->pub.busstate == DHD_BUS_DATA) {
6402                 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
6403                 return 0;
6404         }
6405
6406         if (dhd_open(net) < 0) {
6407                 DHD_ERROR(("Driver Init Failed \n"));
6408                 return -1;
6409         }
6410
6411         return 0;
6412 }
6413
6414 int
6415 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6416 {
6417
6418 #ifdef WL_CFG80211
6419         if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6420                 return BCME_OK;
6421 #endif
6422
6423         /* handle IF event caused by wl commands, SoftAP, WEXT and
6424          * anything else. This has to be done asynchronously otherwise
6425          * DPC will be blocked (and iovars will timeout as DPC has no chance
6426          * to read the response back)
6427          */
6428         if (ifevent->ifidx > 0) {
6429                 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6430                 if (if_event == NULL) {
6431                         DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
6432                                 MALLOCED(dhdinfo->pub.osh)));
6433                         return BCME_NOMEM;
6434                 }
6435
6436                 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6437                 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6438                 strncpy(if_event->name, name, IFNAMSIZ);
6439                 if_event->name[IFNAMSIZ - 1] = '\0';
6440                 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
6441                         DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
6442         }
6443
6444         return BCME_OK;
6445 }
6446
6447 int
6448 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6449 {
6450         dhd_if_event_t *if_event;
6451
6452 #ifdef WL_CFG80211
6453         if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6454                 return BCME_OK;
6455 #endif /* WL_CFG80211 */
6456
6457         /* handle IF event caused by wl commands, SoftAP, WEXT and
6458          * anything else
6459          */
6460         if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6461         if (if_event == NULL) {
6462                 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
6463                         MALLOCED(dhdinfo->pub.osh)));
6464                 return BCME_NOMEM;
6465         }
6466         memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6467         memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6468         strncpy(if_event->name, name, IFNAMSIZ);
6469         if_event->name[IFNAMSIZ - 1] = '\0';
6470         dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
6471                 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
6472
6473         return BCME_OK;
6474 }
6475
6476 /* unregister and free the existing net_device interface (if any) in iflist and
6477  * allocate a new one. the slot is reused. this function does NOT register the
6478  * new interface to linux kernel. dhd_register_if does the job
6479  */
6480 struct net_device*
6481 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
6482         uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
6483 {
6484         dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6485         dhd_if_t *ifp;
6486
6487         ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
6488         ifp = dhdinfo->iflist[ifidx];
6489
6490         if (ifp != NULL) {
6491                 if (ifp->net != NULL) {
6492                         DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
6493
6494                         dhd_dev_priv_clear(ifp->net); /* clear net_device private */
6495
6496                         /* in unregister_netdev case, the interface gets freed by net->destructor
6497                          * (which is set to free_netdev)
6498                          */
6499                         if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6500                                 free_netdev(ifp->net);
6501                         } else {
6502                                 netif_stop_queue(ifp->net);
6503                                 if (need_rtnl_lock)
6504                                         unregister_netdev(ifp->net);
6505                                 else
6506                                         unregister_netdevice(ifp->net);
6507                         }
6508                         ifp->net = NULL;
6509                 }
6510         } else {
6511                 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
6512                 if (ifp == NULL) {
6513                         DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
6514                         return NULL;
6515                 }
6516         }
6517
6518         memset(ifp, 0, sizeof(dhd_if_t));
6519         ifp->info = dhdinfo;
6520         ifp->idx = ifidx;
6521         ifp->bssidx = bssidx;
6522         if (mac != NULL)
6523                 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
6524
6525         /* Allocate etherdev, including space for private structure */
6526         ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
6527         if (ifp->net == NULL) {
6528                 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
6529                 goto fail;
6530         }
6531
6532         /* Setup the dhd interface's netdevice private structure. */
6533         dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
6534
6535         if (name && name[0]) {
6536                 strncpy(ifp->net->name, name, IFNAMSIZ);
6537                 ifp->net->name[IFNAMSIZ - 1] = '\0';
6538         }
6539
6540 #ifdef WL_CFG80211
6541         if (ifidx == 0)
6542                 ifp->net->destructor = free_netdev;
6543         else
6544                 ifp->net->destructor = dhd_netdev_free;
6545 #else
6546         ifp->net->destructor = free_netdev;
6547 #endif /* WL_CFG80211 */
6548         strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
6549         ifp->name[IFNAMSIZ - 1] = '\0';
6550         dhdinfo->iflist[ifidx] = ifp;
6551
6552 /* initialize the dongle provided if name */
6553         if (dngl_name)
6554                 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
6555         else
6556                 strncpy(ifp->dngl_name, name, IFNAMSIZ);
6557
6558 #ifdef PCIE_FULL_DONGLE
6559         /* Initialize STA info list */
6560         INIT_LIST_HEAD(&ifp->sta_list);
6561         DHD_IF_STA_LIST_LOCK_INIT(ifp);
6562 #endif /* PCIE_FULL_DONGLE */
6563
6564 #ifdef DHD_L2_FILTER
6565         ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
6566         ifp->parp_allnode = TRUE;
6567 #endif
6568         return ifp->net;
6569
6570 fail:
6571
6572         if (ifp != NULL) {
6573                 if (ifp->net != NULL) {
6574                         dhd_dev_priv_clear(ifp->net);
6575                         free_netdev(ifp->net);
6576                         ifp->net = NULL;
6577                 }
6578                 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6579                 ifp = NULL;
6580         }
6581
6582         dhdinfo->iflist[ifidx] = NULL;
6583         return NULL;
6584 }
6585
6586 /* unregister and free the the net_device interface associated with the indexed
6587  * slot, also free the slot memory and set the slot pointer to NULL
6588  */
6589 int
6590 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
6591 {
6592         dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6593         dhd_if_t *ifp;
6594
6595         ifp = dhdinfo->iflist[ifidx];
6596
6597         if (ifp != NULL) {
6598                 if (ifp->net != NULL) {
6599                         DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
6600
6601                         /* in unregister_netdev case, the interface gets freed by net->destructor
6602                          * (which is set to free_netdev)
6603                          */
6604                         if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6605                                 free_netdev(ifp->net);
6606                         } else {
6607                                 netif_tx_disable(ifp->net);
6608
6609
6610
6611 #if defined(SET_RPS_CPUS)
6612                                 custom_rps_map_clear(ifp->net->_rx);
6613 #endif /* SET_RPS_CPUS */
6614 #if defined(SET_RPS_CPUS)
6615 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
6616                                 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
6617 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
6618 #endif 
6619                                 if (need_rtnl_lock)
6620                                         unregister_netdev(ifp->net);
6621                                 else
6622                                         unregister_netdevice(ifp->net);
6623                         }
6624                         ifp->net = NULL;
6625                         dhdinfo->iflist[ifidx] = NULL;
6626                 }
6627 #ifdef DHD_WMF
6628                 dhd_wmf_cleanup(dhdpub, ifidx);
6629 #endif /* DHD_WMF */
6630 #ifdef DHD_L2_FILTER
6631                 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
6632                         NULL, FALSE, dhdpub->tickcnt);
6633                 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
6634                 ifp->phnd_arp_table = NULL;
6635 #endif /* DHD_L2_FILTER */
6636
6637                 dhd_if_del_sta_list(ifp);
6638
6639                 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6640
6641         }
6642
6643         return BCME_OK;
6644 }
6645
6646 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6647 static struct net_device_ops dhd_ops_pri = {
6648         .ndo_open = dhd_open,
6649         .ndo_stop = dhd_stop,
6650         .ndo_get_stats = dhd_get_stats,
6651         .ndo_do_ioctl = dhd_ioctl_entry,
6652         .ndo_start_xmit = dhd_start_xmit,
6653         .ndo_set_mac_address = dhd_set_mac_address,
6654 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6655         .ndo_set_rx_mode = dhd_set_multicast_list,
6656 #else
6657         .ndo_set_multicast_list = dhd_set_multicast_list,
6658 #endif
6659 };
6660
6661 static struct net_device_ops dhd_ops_virt = {
6662         .ndo_get_stats = dhd_get_stats,
6663         .ndo_do_ioctl = dhd_ioctl_entry,
6664         .ndo_start_xmit = dhd_start_xmit,
6665         .ndo_set_mac_address = dhd_set_mac_address,
6666 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6667         .ndo_set_rx_mode = dhd_set_multicast_list,
6668 #else
6669         .ndo_set_multicast_list = dhd_set_multicast_list,
6670 #endif
6671 };
6672 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
6673
6674 #ifdef DEBUGGER
6675 extern void debugger_init(void *bus_handle);
6676 #endif
6677
6678
6679 #ifdef SHOW_LOGTRACE
6680 static char *logstrs_path = "/root/logstrs.bin";
6681 static char *st_str_file_path = "/root/rtecdc.bin";
6682 static char *map_file_path = "/root/rtecdc.map";
6683 static char *rom_st_str_file_path = "/root/roml.bin";
6684 static char *rom_map_file_path = "/root/roml.map";
6685
6686 #define BYTES_AHEAD_NUM         11      /* address in map file is before these many bytes */
6687 #define READ_NUM_BYTES          1000 /* read map file each time this No. of bytes */
6688 #define GO_BACK_FILE_POS_NUM_BYTES      100 /* set file pos back to cur pos */
6689 static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
6690 static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
6691 static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
6692 static char *ram_file_str = "rtecdc";
6693 static char *rom_file_str = "roml";
6694 #define RAMSTART_BIT    0x01
6695 #define RDSTART_BIT             0x02
6696 #define RDEND_BIT               0x04
6697 #define ALL_MAP_VAL             (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
6698
6699 module_param(logstrs_path, charp, S_IRUGO);
6700 module_param(st_str_file_path, charp, S_IRUGO);
6701 module_param(map_file_path, charp, S_IRUGO);
6702 module_param(rom_st_str_file_path, charp, S_IRUGO);
6703 module_param(rom_map_file_path, charp, S_IRUGO);
6704
6705 static void
6706 dhd_init_logstrs_array(dhd_event_log_t *temp)
6707 {
6708         struct file *filep = NULL;
6709         struct kstat stat;
6710         mm_segment_t fs;
6711         char *raw_fmts =  NULL;
6712         int logstrs_size = 0;
6713
6714         logstr_header_t *hdr = NULL;
6715         uint32 *lognums = NULL;
6716         char *logstrs = NULL;
6717         int ram_index = 0;
6718         char **fmts;
6719         int num_fmts = 0;
6720         uint32 i = 0;
6721         int error = 0;
6722
6723         fs = get_fs();
6724         set_fs(KERNEL_DS);
6725
6726         filep = filp_open(logstrs_path, O_RDONLY, 0);
6727
6728         if (IS_ERR(filep)) {
6729                 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
6730                 goto fail;
6731         }
6732         error = vfs_stat(logstrs_path, &stat);
6733         if (error) {
6734                 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
6735                 goto fail;
6736         }
6737         logstrs_size = (int) stat.size;
6738
6739         raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6740         if (raw_fmts == NULL) {
6741                 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
6742                 goto fail;
6743         }
6744         if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) !=   logstrs_size) {
6745                 DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
6746                 goto fail;
6747         }
6748
6749         /* Remember header from the logstrs.bin file */
6750         hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6751                 sizeof(logstr_header_t));
6752
6753         if (hdr->log_magic == LOGSTRS_MAGIC) {
6754                 /*
6755                 * logstrs.bin start with header.
6756                 */
6757                 num_fmts =      hdr->rom_logstrs_offset / sizeof(uint32);
6758                 ram_index = (hdr->ram_lognums_offset -
6759                         hdr->rom_lognums_offset) / sizeof(uint32);
6760                 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6761                 logstrs = (char *)       &raw_fmts[hdr->rom_logstrs_offset];
6762         } else {
6763                 /*
6764                  * Legacy logstrs.bin format without header.
6765                  */
6766                 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6767                 if (num_fmts == 0) {
6768                         /* Legacy ROM/RAM logstrs.bin format:
6769                           *  - ROM 'lognums' section
6770                           *   - RAM 'lognums' section
6771                           *   - ROM 'logstrs' section.
6772                           *   - RAM 'logstrs' section.
6773                           *
6774                           * 'lognums' is an array of indexes for the strings in the
6775                           * 'logstrs' section. The first uint32 is 0 (index of first
6776                           * string in ROM 'logstrs' section).
6777                           *
6778                           * The 4324b5 is the only ROM that uses this legacy format. Use the
6779                           * fixed number of ROM fmtnums to find the start of the RAM
6780                           * 'lognums' section. Use the fixed first ROM string ("Con\n") to
6781                           * find the ROM 'logstrs' section.
6782                           */
6783                         #define NUM_4324B5_ROM_FMTS     186
6784                         #define FIRST_4324B5_ROM_LOGSTR "Con\n"
6785                         ram_index = NUM_4324B5_ROM_FMTS;
6786                         lognums = (uint32 *) raw_fmts;
6787                         num_fmts =      ram_index;
6788                         logstrs = (char *) &raw_fmts[num_fmts << 2];
6789                         while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
6790                                 num_fmts++;
6791                                 logstrs = (char *) &raw_fmts[num_fmts << 2];
6792                         }
6793                 } else {
6794                                 /* Legacy RAM-only logstrs.bin format:
6795                                  *        - RAM 'lognums' section
6796                                  *        - RAM 'logstrs' section.
6797                                  *
6798                                  * 'lognums' is an array of indexes for the strings in the
6799                                  * 'logstrs' section. The first uint32 is an index to the
6800                                  * start of 'logstrs'. Therefore, if this index is divided
6801                                  * by 'sizeof(uint32)' it provides the number of logstr
6802                                  *      entries.
6803                                  */
6804                                 ram_index = 0;
6805                                 lognums = (uint32 *) raw_fmts;
6806                                 logstrs = (char *)      &raw_fmts[num_fmts << 2];
6807                         }
6808         }
6809         fmts = kmalloc(num_fmts  * sizeof(char *), GFP_KERNEL);
6810         if (fmts == NULL) {
6811                 DHD_ERROR(("Failed to allocate fmts memory\n"));
6812                 goto fail;
6813         }
6814
6815         for (i = 0; i < num_fmts; i++) {
6816                 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6817                 * (they are 0-indexed relative to 'rom_logstrs_offset').
6818                 *
6819                 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6820                 * are 0-indexed relative to the start of the logstrs.bin file).
6821                 */
6822                 if (i == ram_index) {
6823                         logstrs = raw_fmts;
6824                 }
6825                 fmts[i] = &logstrs[lognums[i]];
6826         }
6827         temp->fmts = fmts;
6828         temp->raw_fmts = raw_fmts;
6829         temp->num_fmts = num_fmts;
6830         filp_close(filep, NULL);
6831         set_fs(fs);
6832         return;
6833 fail:
6834         if (raw_fmts) {
6835                 kfree(raw_fmts);
6836                 raw_fmts = NULL;
6837         }
6838         if (!IS_ERR(filep))
6839                 filp_close(filep, NULL);
6840         set_fs(fs);
6841         temp->fmts = NULL;
6842         return;
6843 }
6844
6845 static int
6846 dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
6847         uint32 *rodata_end)
6848 {
6849         struct file *filep = NULL;
6850         mm_segment_t fs;
6851         char *raw_fmts =  NULL;
6852         uint32 read_size = READ_NUM_BYTES;
6853         int error = 0;
6854         char * cptr = NULL;
6855         char c;
6856         uint8 count = 0;
6857
6858         *ramstart = 0;
6859         *rodata_start = 0;
6860         *rodata_end = 0;
6861
6862         if (fname == NULL) {
6863                 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
6864                 return BCME_ERROR;
6865         }
6866
6867         fs = get_fs();
6868         set_fs(KERNEL_DS);
6869
6870         filep = filp_open(fname, O_RDONLY, 0);
6871         if (IS_ERR(filep)) {
6872                 DHD_ERROR(("%s: Failed to open %s \n",  __FUNCTION__, fname));
6873                 goto fail;
6874         }
6875
6876         /* Allocate 1 byte more than read_size to terminate it with NULL */
6877         raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
6878         if (raw_fmts == NULL) {
6879                 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6880                 goto fail;
6881         }
6882
6883         /* read ram start, rodata_start and rodata_end values from map  file */
6884
6885         while (count != ALL_MAP_VAL)
6886         {
6887                 error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
6888                 if (error < 0) {
6889                         DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
6890                                 map_file_path, error));
6891                         goto fail;
6892                 }
6893
6894                 if (error < read_size) {
6895                         /*
6896                         * since we reset file pos back to earlier pos by
6897                         * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6898                         * So if ret value is less than read_size, reached EOF don't read further
6899                         */
6900                         break;
6901                 }
6902                 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6903                 raw_fmts[read_size] = '\0';
6904
6905                 /* Get ramstart address */
6906                 if ((cptr = strstr(raw_fmts, ramstart_str))) {
6907                         cptr = cptr - BYTES_AHEAD_NUM;
6908                         sscanf(cptr, "%x %c text_start", ramstart, &c);
6909                         count |= RAMSTART_BIT;
6910                 }
6911
6912                 /* Get ram rodata start address */
6913                 if ((cptr = strstr(raw_fmts, rodata_start_str))) {
6914                         cptr = cptr - BYTES_AHEAD_NUM;
6915                         sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6916                         count |= RDSTART_BIT;
6917                 }
6918
6919                 /* Get ram rodata end address */
6920                 if ((cptr = strstr(raw_fmts, rodata_end_str))) {
6921                         cptr = cptr - BYTES_AHEAD_NUM;
6922                         sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6923                         count |= RDEND_BIT;
6924                 }
6925                 memset(raw_fmts, 0, read_size);
6926                 /*
6927                 * go back to predefined NUM of bytes so that we won't miss
6928                 * the string and  addr even if it comes as splited in next read.
6929                 */
6930                 filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
6931         }
6932
6933         DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6934                 *ramstart, *rodata_start, *rodata_end));
6935
6936         DHD_ERROR(("readmap over \n"));
6937
6938 fail:
6939         if (raw_fmts) {
6940                 kfree(raw_fmts);
6941                 raw_fmts = NULL;
6942         }
6943         if (!IS_ERR(filep))
6944                 filp_close(filep, NULL);
6945
6946         set_fs(fs);
6947         if (count == ALL_MAP_VAL) {
6948                 return BCME_OK;
6949         }
6950         DHD_ERROR(("readmap error 0X%x \n", count));
6951         return BCME_ERROR;
6952 }
6953
6954 static void
6955 dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
6956 {
6957         struct file *filep = NULL;
6958         mm_segment_t fs;
6959         char *raw_fmts =  NULL;
6960         uint32 logstrs_size = 0;
6961
6962         int error = 0;
6963         uint32 ramstart = 0;
6964         uint32 rodata_start = 0;
6965         uint32 rodata_end = 0;
6966         uint32 logfilebase = 0;
6967
6968         error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
6969         if (error == BCME_ERROR) {
6970                 DHD_ERROR(("readmap Error!! \n"));
6971                 /* don't do event log parsing in actual case */
6972                 temp->raw_sstr = NULL;
6973                 return;
6974         }
6975         DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6976                 ramstart, rodata_start, rodata_end));
6977
6978         fs = get_fs();
6979         set_fs(KERNEL_DS);
6980
6981         filep = filp_open(str_file, O_RDONLY, 0);
6982         if (IS_ERR(filep)) {
6983                 DHD_ERROR(("%s: Failed to open the file %s \n",  __FUNCTION__, str_file));
6984                 goto fail;
6985         }
6986
6987         /* Full file size is huge. Just read required part */
6988         logstrs_size = rodata_end - rodata_start;
6989
6990         raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6991         if (raw_fmts == NULL) {
6992                 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6993                 goto fail;
6994         }
6995
6996         logfilebase = rodata_start - ramstart;
6997
6998         error = generic_file_llseek(filep, logfilebase, SEEK_SET);
6999         if (error < 0) {
7000                 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
7001                 goto fail;
7002         }
7003
7004         error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
7005         if (error != logstrs_size) {
7006                 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
7007                 goto fail;
7008         }
7009
7010         if (strstr(str_file, ram_file_str) != NULL) {
7011                 temp->raw_sstr = raw_fmts;
7012                 temp->ramstart = ramstart;
7013                 temp->rodata_start = rodata_start;
7014                 temp->rodata_end = rodata_end;
7015         } else if (strstr(str_file, rom_file_str) != NULL) {
7016                 temp->rom_raw_sstr = raw_fmts;
7017                 temp->rom_ramstart = ramstart;
7018                 temp->rom_rodata_start = rodata_start;
7019                 temp->rom_rodata_end = rodata_end;
7020         }
7021
7022         filp_close(filep, NULL);
7023         set_fs(fs);
7024
7025         return;
7026 fail:
7027         if (raw_fmts) {
7028                 kfree(raw_fmts);
7029                 raw_fmts = NULL;
7030         }
7031         if (!IS_ERR(filep))
7032                 filp_close(filep, NULL);
7033         set_fs(fs);
7034         if (strstr(str_file, ram_file_str) != NULL) {
7035                 temp->raw_sstr = NULL;
7036         } else if (strstr(str_file, rom_file_str) != NULL) {
7037                 temp->rom_raw_sstr = NULL;
7038         }
7039         return;
7040 }
7041
7042 #endif /* SHOW_LOGTRACE */
7043
7044
7045 dhd_pub_t *
7046 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
7047 {
7048         dhd_info_t *dhd = NULL;
7049         struct net_device *net = NULL;
7050         char if_name[IFNAMSIZ] = {'\0'};
7051         uint32 bus_type = -1;
7052         uint32 bus_num = -1;
7053         uint32 slot_num = -1;
7054         wifi_adapter_info_t *adapter = NULL;
7055
7056         dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
7057         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7058
7059 #ifdef STBLINUX
7060         DHD_ERROR(("%s\n", driver_target));
7061 #endif /* STBLINUX */
7062         /* will implement get_ids for DBUS later */
7063 #if defined(BCMSDIO)
7064         dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
7065 #endif 
7066         adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7067
7068         /* Allocate primary dhd_info */
7069         dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
7070         if (dhd == NULL) {
7071                 dhd = MALLOC(osh, sizeof(dhd_info_t));
7072                 if (dhd == NULL) {
7073                         DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
7074                         goto fail;
7075                 }
7076         }
7077         memset(dhd, 0, sizeof(dhd_info_t));
7078         dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
7079
7080         dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
7081
7082         dhd->pub.osh = osh;
7083         dhd->adapter = adapter;
7084
7085 #ifdef GET_CUSTOM_MAC_ENABLE
7086         wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
7087 #endif /* GET_CUSTOM_MAC_ENABLE */
7088 #ifdef CUSTOM_FORCE_NODFS_FLAG
7089         dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
7090         dhd->pub.force_country_change = TRUE;
7091 #endif /* CUSTOM_FORCE_NODFS_FLAG */
7092 #ifdef CUSTOM_COUNTRY_CODE
7093         get_customized_country_code(dhd->adapter,
7094                 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
7095                 dhd->pub.dhd_cflags);
7096 #endif /* CUSTOM_COUNTRY_CODE */
7097         dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
7098         dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
7099
7100         /* Initialize thread based operation and lock */
7101         sema_init(&dhd->sdsem, 1);
7102
7103         /* Link to info module */
7104         dhd->pub.info = dhd;
7105
7106
7107         /* Link to bus module */
7108         dhd->pub.bus = bus;
7109         dhd->pub.hdrlen = bus_hdrlen;
7110
7111         /* dhd_conf must be attached after linking dhd to dhd->pub.info,
7112          * because dhd_detech will check .info is NULL or not.
7113         */
7114         if (dhd_conf_attach(&dhd->pub) != 0) {
7115                 DHD_ERROR(("dhd_conf_attach failed\n"));
7116                 goto fail;
7117         }
7118         dhd_conf_reset(&dhd->pub);
7119         dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
7120         dhd_conf_preinit(&dhd->pub);
7121
7122         /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
7123          * This is indeed a hack but we have to make it work properly before we have a better
7124          * solution
7125          */
7126         dhd_update_fw_nv_path(dhd);
7127 #ifndef BUILD_IN_KERNEL
7128         dhd_conf_read_config(&dhd->pub, dhd->conf_path);
7129 #endif
7130
7131         /* Set network interface name if it was provided as module parameter */
7132         if (iface_name[0]) {
7133                 int len;
7134                 char ch;
7135                 strncpy(if_name, iface_name, IFNAMSIZ);
7136                 if_name[IFNAMSIZ - 1] = 0;
7137                 len = strlen(if_name);
7138                 ch = if_name[len - 1];
7139                 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
7140                         strcat(if_name, "%d");
7141         }
7142
7143         /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
7144         net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
7145         if (net == NULL) {
7146                 goto fail;
7147         }
7148
7149
7150         dhd_state |= DHD_ATTACH_STATE_ADD_IF;
7151 #ifdef DHD_L2_FILTER
7152         /* initialize the l2_filter_cnt */
7153         dhd->pub.l2_filter_cnt = 0;
7154 #endif
7155 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7156         net->open = NULL;
7157 #else
7158         net->netdev_ops = NULL;
7159 #endif
7160
7161         mutex_init(&dhd->dhd_iovar_mutex);
7162         sema_init(&dhd->proto_sem, 1);
7163
7164 #ifdef PROP_TXSTATUS
7165         spin_lock_init(&dhd->wlfc_spinlock);
7166
7167         dhd->pub.skip_fc = dhd_wlfc_skip_fc;
7168         dhd->pub.plat_init = dhd_wlfc_plat_init;
7169         dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
7170
7171 #ifdef DHD_WLFC_THREAD
7172         init_waitqueue_head(&dhd->pub.wlfc_wqhead);
7173         dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
7174         if (IS_ERR(dhd->pub.wlfc_thread)) {
7175                 DHD_ERROR(("create wlfc thread failed\n"));
7176                 goto fail;
7177         } else {
7178                 wake_up_process(dhd->pub.wlfc_thread);
7179         }
7180 #endif /* DHD_WLFC_THREAD */
7181 #endif /* PROP_TXSTATUS */
7182
7183         /* Initialize other structure content */
7184         init_waitqueue_head(&dhd->ioctl_resp_wait);
7185         init_waitqueue_head(&dhd->d3ack_wait);
7186         init_waitqueue_head(&dhd->ctrl_wait);
7187         init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
7188         dhd->pub.dhd_bus_busy_state = 0;
7189
7190         /* Initialize the spinlocks */
7191         spin_lock_init(&dhd->sdlock);
7192         spin_lock_init(&dhd->txqlock);
7193         spin_lock_init(&dhd->dhd_lock);
7194         spin_lock_init(&dhd->rxf_lock);
7195 #if defined(RXFRAME_THREAD)
7196         dhd->rxthread_enabled = TRUE;
7197 #endif /* defined(RXFRAME_THREAD) */
7198
7199 #ifdef DHDTCPACK_SUPPRESS
7200         spin_lock_init(&dhd->tcpack_lock);
7201 #endif /* DHDTCPACK_SUPPRESS */
7202
7203         /* Initialize Wakelock stuff */
7204         spin_lock_init(&dhd->wakelock_spinlock);
7205         spin_lock_init(&dhd->wakelock_evt_spinlock);
7206         DHD_OS_WAKE_LOCK_INIT(dhd);
7207         dhd->wakelock_wd_counter = 0;
7208 #ifdef CONFIG_HAS_WAKELOCK
7209         wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
7210 #endif /* CONFIG_HAS_WAKELOCK */
7211
7212 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7213         mutex_init(&dhd->dhd_net_if_mutex);
7214         mutex_init(&dhd->dhd_suspend_mutex);
7215 #endif
7216         dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7217
7218         /* Attach and link in the protocol */
7219         if (dhd_prot_attach(&dhd->pub) != 0) {
7220                 DHD_ERROR(("dhd_prot_attach failed\n"));
7221                 goto fail;
7222         }
7223         dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
7224
7225 #ifdef WL_CFG80211
7226         /* Attach and link in the cfg80211 */
7227         if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
7228                 DHD_ERROR(("wl_cfg80211_attach failed\n"));
7229                 goto fail;
7230         }
7231
7232         dhd_monitor_init(&dhd->pub);
7233         dhd_state |= DHD_ATTACH_STATE_CFG80211;
7234 #endif
7235 #ifdef DHD_LOG_DUMP
7236         dhd_log_dump_init(&dhd->pub);
7237 #endif /* DHD_LOG_DUMP */
7238 #if defined(WL_WIRELESS_EXT)
7239         /* Attach and link in the iw */
7240         if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
7241                 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
7242                         DHD_ERROR(("wl_iw_attach failed\n"));
7243                         goto fail;
7244                 }
7245                 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
7246         }
7247 #ifdef WL_ESCAN
7248         wl_escan_attach(net, (void *)&dhd->pub);
7249 #endif
7250 #endif /* defined(WL_WIRELESS_EXT) */
7251
7252 #ifdef SHOW_LOGTRACE
7253         dhd_init_logstrs_array(&dhd->event_data);
7254         dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
7255         dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
7256 #endif /* SHOW_LOGTRACE */
7257
7258         if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
7259                 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
7260                 goto fail;
7261         }
7262
7263
7264
7265         /* Set up the watchdog timer */
7266         init_timer(&dhd->timer);
7267         dhd->timer.data = (ulong)dhd;
7268         dhd->timer.function = dhd_watchdog;
7269         dhd->default_wd_interval = dhd_watchdog_ms;
7270
7271         if (dhd_watchdog_prio >= 0) {
7272                 /* Initialize watchdog thread */
7273                 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
7274                 if (dhd->thr_wdt_ctl.thr_pid < 0) {
7275                         goto fail;
7276                 }
7277
7278         } else {
7279                 dhd->thr_wdt_ctl.thr_pid = -1;
7280         }
7281
7282 #ifdef DHD_PCIE_RUNTIMEPM
7283         /* Setup up the runtime PM Idlecount timer */
7284         init_timer(&dhd->rpm_timer);
7285         dhd->rpm_timer.data = (ulong)dhd;
7286         dhd->rpm_timer.function = dhd_runtimepm;
7287         dhd->rpm_timer_valid = FALSE;
7288
7289         dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
7290         PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
7291         if (dhd->thr_rpm_ctl.thr_pid < 0) {
7292                 goto fail;
7293         }
7294 #endif /* DHD_PCIE_RUNTIMEPM */
7295
7296 #ifdef DEBUGGER
7297         debugger_init((void *) bus);
7298 #endif
7299
7300         /* Set up the bottom half handler */
7301         if (dhd_dpc_prio >= 0) {
7302                 /* Initialize DPC thread */
7303                 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
7304                 if (dhd->thr_dpc_ctl.thr_pid < 0) {
7305                         goto fail;
7306                 }
7307         } else {
7308                 /*  use tasklet for dpc */
7309                 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
7310                 dhd->thr_dpc_ctl.thr_pid = -1;
7311         }
7312
7313         if (dhd->rxthread_enabled) {
7314                 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
7315                 /* Initialize RXF thread */
7316                 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
7317                 if (dhd->thr_rxf_ctl.thr_pid < 0) {
7318                         goto fail;
7319                 }
7320         }
7321
7322         dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
7323
7324 #if defined(CONFIG_PM_SLEEP)
7325         if (!dhd_pm_notifier_registered) {
7326                 dhd_pm_notifier_registered = TRUE;
7327                 dhd->pm_notifier.notifier_call = dhd_pm_callback;
7328                 dhd->pm_notifier.priority = 10;
7329                 register_pm_notifier(&dhd->pm_notifier);
7330         }
7331
7332 #endif /* CONFIG_PM_SLEEP */
7333
7334 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7335         dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
7336         dhd->early_suspend.suspend = dhd_early_suspend;
7337         dhd->early_suspend.resume = dhd_late_resume;
7338         register_early_suspend(&dhd->early_suspend);
7339         dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
7340 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7341
7342 #ifdef ARP_OFFLOAD_SUPPORT
7343         dhd->pend_ipaddr = 0;
7344         if (!dhd_inetaddr_notifier_registered) {
7345                 dhd_inetaddr_notifier_registered = TRUE;
7346                 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7347         }
7348 #endif /* ARP_OFFLOAD_SUPPORT */
7349
7350 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7351         if (!dhd_inet6addr_notifier_registered) {
7352                 dhd_inet6addr_notifier_registered = TRUE;
7353                 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7354         }
7355 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7356         dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
7357 #ifdef DEBUG_CPU_FREQ
7358         dhd->new_freq = alloc_percpu(int);
7359         dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
7360         cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7361 #endif
7362 #ifdef DHDTCPACK_SUPPRESS
7363 #ifdef BCMSDIO
7364         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
7365 #elif defined(BCMPCIE)
7366         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
7367 #else
7368         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7369 #endif /* BCMSDIO */
7370 #endif /* DHDTCPACK_SUPPRESS */
7371
7372 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7373 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7374
7375         dhd_state |= DHD_ATTACH_STATE_DONE;
7376         dhd->dhd_state = dhd_state;
7377
7378         dhd_found++;
7379 #ifdef DHD_DEBUG_PAGEALLOC
7380         register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
7381 #endif /* DHD_DEBUG_PAGEALLOC */
7382
7383 #if defined(DHD_LB)
7384         DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
7385
7386         dhd_lb_set_default_cpus(dhd);
7387
7388         /* Initialize the CPU Masks */
7389         if (dhd_cpumasks_init(dhd) ==  0) {
7390
7391                 /* Now we have the current CPU maps, run through candidacy */
7392                 dhd_select_cpu_candidacy(dhd);
7393
7394                 /*
7395                  * If we are able to initialize CPU masks, lets register to the
7396                  * CPU Hotplug framework to change the CPU for each job dynamically
7397                  * using candidacy algorithm.
7398                  */
7399                 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
7400                 register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
7401         } else {
7402                 /*
7403                  * We are unable to initialize CPU masks, so candidacy algorithm
7404                  * won't run, but still Load Balancing will be honoured based
7405                  * on the CPUs allocated for a given job statically during init
7406                  */
7407                 dhd->cpu_notifier.notifier_call = NULL;
7408                 DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
7409                         __FUNCTION__));
7410         }
7411
7412
7413         DHD_LB_STATS_INIT(&dhd->pub);
7414
7415         /* Initialize the Load Balancing Tasklets and Napi object */
7416 #if defined(DHD_LB_TXC)
7417         tasklet_init(&dhd->tx_compl_tasklet,
7418                 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
7419         INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
7420         DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
7421 #endif /* DHD_LB_TXC */
7422
7423 #if defined(DHD_LB_RXC)
7424         tasklet_init(&dhd->rx_compl_tasklet,
7425                 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
7426         INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
7427         DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
7428 #endif /* DHD_LB_RXC */
7429
7430 #if defined(DHD_LB_RXP)
7431          __skb_queue_head_init(&dhd->rx_pend_queue);
7432         skb_queue_head_init(&dhd->rx_napi_queue);
7433
7434         /* Initialize the work that dispatches NAPI job to a given core */
7435         INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
7436         DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
7437 #endif /* DHD_LB_RXP */
7438
7439 #endif /* DHD_LB */
7440
7441         INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
7442
7443         (void)dhd_sysfs_init(dhd);
7444
7445         return &dhd->pub;
7446
7447 fail:
7448         if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
7449                 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
7450                         __FUNCTION__, dhd_state, &dhd->pub));
7451                 dhd->dhd_state = dhd_state;
7452                 dhd_detach(&dhd->pub);
7453                 dhd_free(&dhd->pub);
7454         }
7455
7456         return NULL;
7457 }
7458
7459 #include <linux/delay.h>
7460
7461 void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
7462 {
7463         dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7464
7465         schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
7466 }
7467
7468 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
7469 {
7470         if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
7471                 return DHD_FLAG_HOSTAP_MODE;
7472         if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
7473                 return DHD_FLAG_P2P_MODE;
7474         if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
7475                 return DHD_FLAG_IBSS_MODE;
7476         if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
7477                 return DHD_FLAG_MFG_MODE;
7478
7479         return DHD_FLAG_STA_MODE;
7480 }
7481
7482 extern int rkwifi_set_firmware(char *fw, char *nvram);
7483 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
7484 {
7485         int fw_len;
7486         int nv_len;
7487         int conf_len;
7488         const char *fw = NULL;
7489         const char *nv = NULL;
7490         const char *conf = NULL;
7491         char firmware[100] = {0};
7492         char nvram[100] = {0};
7493         wifi_adapter_info_t *adapter = dhdinfo->adapter;
7494
7495
7496         /* Update firmware and nvram path. The path may be from adapter info or module parameter
7497          * The path from adapter info is used for initialization only (as it won't change).
7498          *
7499          * The firmware_path/nvram_path module parameter may be changed by the system at run
7500          * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
7501          * command may change dhdinfo->fw_path. As such we need to clear the path info in
7502          * module parameter after it is copied. We won't update the path until the module parameter
7503          * is changed again (first character is not '\0')
7504          */
7505
7506         /* set default firmware and nvram path for built-in type driver */
7507 //      if (!dhd_download_fw_on_driverload) {
7508                 rkwifi_set_firmware(firmware, nvram);
7509 #ifdef CONFIG_BCMDHD_FW_PATH
7510                 fw = CONFIG_BCMDHD_FW_PATH;
7511 #else
7512                 fw = firmware;
7513 #endif /* CONFIG_BCMDHD_FW_PATH */
7514 #ifdef CONFIG_BCMDHD_NVRAM_PATH
7515                 nv = CONFIG_BCMDHD_NVRAM_PATH;
7516 #else
7517                 nv = nvram;
7518 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
7519 //      }
7520
7521         /* check if we need to initialize the path */
7522         if (dhdinfo->fw_path[0] == '\0') {
7523                 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
7524                         fw = adapter->fw_path;
7525
7526         }
7527         if (dhdinfo->nv_path[0] == '\0') {
7528                 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
7529                         nv = adapter->nv_path;
7530         }
7531         if (dhdinfo->conf_path[0] == '\0') {
7532                 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
7533                         conf = adapter->conf_path;
7534         }
7535
7536         /* Use module parameter if it is valid, EVEN IF the path has not been initialized
7537          *
7538          * TODO: need a solution for multi-chip, can't use the same firmware for all chips
7539          */
7540         if (firmware_path[0] != '\0')
7541                 fw = firmware_path;
7542         if (nvram_path[0] != '\0')
7543                 nv = nvram_path;
7544         if (config_path[0] != '\0')
7545                 conf = config_path;
7546
7547         if (fw && fw[0] != '\0') {
7548                 fw_len = strlen(fw);
7549                 if (fw_len >= sizeof(dhdinfo->fw_path)) {
7550                         DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
7551                         return FALSE;
7552                 }
7553                 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
7554                 if (dhdinfo->fw_path[fw_len-1] == '\n')
7555                        dhdinfo->fw_path[fw_len-1] = '\0';
7556         }
7557         if (nv && nv[0] != '\0') {
7558                 nv_len = strlen(nv);
7559                 if (nv_len >= sizeof(dhdinfo->nv_path)) {
7560                         DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
7561                         return FALSE;
7562                 }
7563                 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
7564                 if (dhdinfo->nv_path[nv_len-1] == '\n')
7565                        dhdinfo->nv_path[nv_len-1] = '\0';
7566         }
7567         if (conf && conf[0] != '\0') {
7568                 conf_len = strlen(conf);
7569                 if (conf_len >= sizeof(dhdinfo->conf_path)) {
7570                         DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
7571                         return FALSE;
7572                 }
7573                 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
7574                 if (dhdinfo->conf_path[conf_len-1] == '\n')
7575                        dhdinfo->conf_path[conf_len-1] = '\0';
7576         }
7577
7578 #if 0
7579         /* clear the path in module parameter */
7580         if (dhd_download_fw_on_driverload) {
7581                 firmware_path[0] = '\0';
7582                 nvram_path[0] = '\0';
7583                 config_path[0] = '\0';
7584         }
7585 #endif
7586
7587 #ifndef BCMEMBEDIMAGE
7588         /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
7589         if (dhdinfo->fw_path[0] == '\0') {
7590                 DHD_ERROR(("firmware path not found\n"));
7591                 return FALSE;
7592         }
7593         if (dhdinfo->nv_path[0] == '\0') {
7594                 DHD_ERROR(("nvram path not found\n"));
7595                 return FALSE;
7596         }
7597         if (dhdinfo->conf_path[0] == '\0') {
7598                 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
7599         }
7600 #ifdef CONFIG_PATH_AUTO_SELECT
7601         dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path);
7602 #endif
7603 #endif /* BCMEMBEDIMAGE */
7604
7605         return TRUE;
7606 }
7607
7608 #ifdef CUSTOMER_HW4_DEBUG
7609 bool dhd_validate_chipid(dhd_pub_t *dhdp)
7610 {
7611         uint chipid = dhd_bus_chip_id(dhdp);
7612         uint config_chipid;
7613
7614 #ifdef BCM4359_CHIP
7615         config_chipid = BCM4359_CHIP_ID;
7616 #elif defined(BCM4358_CHIP)
7617         config_chipid = BCM4358_CHIP_ID;
7618 #elif defined(BCM4354_CHIP)
7619         config_chipid = BCM4354_CHIP_ID;
7620 #elif defined(BCM4356_CHIP)
7621         config_chipid = BCM4356_CHIP_ID;
7622 #elif defined(BCM4339_CHIP)
7623         config_chipid = BCM4339_CHIP_ID;
7624 #elif defined(BCM43349_CHIP)
7625         config_chipid = BCM43349_CHIP_ID;
7626 #elif defined(BCM4335_CHIP)
7627         config_chipid = BCM4335_CHIP_ID;
7628 #elif defined(BCM43241_CHIP)
7629         config_chipid = BCM4324_CHIP_ID;
7630 #elif defined(BCM4330_CHIP)
7631         config_chipid = BCM4330_CHIP_ID;
7632 #elif defined(BCM43430_CHIP)
7633         config_chipid = BCM43430_CHIP_ID;
7634 #elif defined(BCM4334W_CHIP)
7635         config_chipid = BCM43342_CHIP_ID;
7636 #elif defined(BCM43455_CHIP)
7637         config_chipid = BCM4345_CHIP_ID;
7638 #else
7639         DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
7640                 " please add CONFIG_BCMXXXX into the Kernel and"
7641                 " BCMXXXX_CHIP definition into the DHD driver\n",
7642                 __FUNCTION__));
7643         config_chipid = 0;
7644
7645         return FALSE;
7646 #endif /* BCM4354_CHIP */
7647
7648 #if defined(BCM4359_CHIP)
7649         if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
7650                 return TRUE;
7651         }
7652 #endif /* BCM4359_CHIP */
7653
7654         return config_chipid == chipid;
7655 }
7656 #endif /* CUSTOMER_HW4_DEBUG */
7657
7658 int
7659 dhd_bus_start(dhd_pub_t *dhdp)
7660 {
7661         int ret = -1;
7662         dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7663         unsigned long flags;
7664
7665         ASSERT(dhd);
7666
7667         DHD_TRACE(("Enter %s:\n", __FUNCTION__));
7668
7669         DHD_PERIM_LOCK(dhdp);
7670
7671         /* try to download image and nvram to the dongle */
7672         if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
7673                 /* Indicate FW Download has not yet done */
7674                 dhd->pub.is_fw_download_done = FALSE;
7675                 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
7676                         __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
7677                 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
7678                         dhd->fw_path, dhd->nv_path, dhd->conf_path);
7679                 if (ret < 0) {
7680                         DHD_ERROR(("%s: failed to download firmware %s\n",
7681                                 __FUNCTION__, dhd->fw_path));
7682                         DHD_PERIM_UNLOCK(dhdp);
7683                         return ret;
7684                 }
7685                 /* Indicate FW Download has succeeded */
7686                 dhd->pub.is_fw_download_done = TRUE;
7687         }
7688         if (dhd->pub.busstate != DHD_BUS_LOAD) {
7689                 DHD_PERIM_UNLOCK(dhdp);
7690                 return -ENETDOWN;
7691         }
7692
7693         dhd_os_sdlock(dhdp);
7694
7695         /* Start the watchdog timer */
7696         dhd->pub.tickcnt = 0;
7697         dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
7698         DHD_ENABLE_RUNTIME_PM(&dhd->pub);
7699
7700         /* Bring up the bus */
7701         if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
7702
7703                 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
7704                 dhd_os_sdunlock(dhdp);
7705                 DHD_PERIM_UNLOCK(dhdp);
7706                 return ret;
7707         }
7708 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7709 #if defined(BCMPCIE_OOB_HOST_WAKE)
7710         dhd_os_sdunlock(dhdp);
7711 #endif /* BCMPCIE_OOB_HOST_WAKE */
7712         /* Host registration for OOB interrupt */
7713         if (dhd_bus_oob_intr_register(dhdp)) {
7714                 /* deactivate timer and wait for the handler to finish */
7715 #if !defined(BCMPCIE_OOB_HOST_WAKE)
7716                 DHD_GENERAL_LOCK(&dhd->pub, flags);
7717                 dhd->wd_timer_valid = FALSE;
7718                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7719                 del_timer_sync(&dhd->timer);
7720
7721                 dhd_os_sdunlock(dhdp);
7722 #endif /* !BCMPCIE_OOB_HOST_WAKE */
7723                 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7724                 DHD_PERIM_UNLOCK(dhdp);
7725                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7726                 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
7727                 return -ENODEV;
7728         }
7729
7730 #if defined(BCMPCIE_OOB_HOST_WAKE)
7731         dhd_os_sdlock(dhdp);
7732         dhd_bus_oob_intr_set(dhdp, TRUE);
7733 #else
7734         /* Enable oob at firmware */
7735         dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7736 #endif /* BCMPCIE_OOB_HOST_WAKE */
7737 #elif defined(FORCE_WOWLAN)
7738         /* Enable oob at firmware */
7739         dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7740 #endif 
7741 #ifdef PCIE_FULL_DONGLE
7742         {
7743                 /* max_h2d_rings includes H2D common rings */
7744                 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
7745
7746                 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
7747                         max_h2d_rings));
7748                 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
7749                         dhd_os_sdunlock(dhdp);
7750                         DHD_PERIM_UNLOCK(dhdp);
7751                         return ret;
7752                 }
7753         }
7754 #endif /* PCIE_FULL_DONGLE */
7755
7756         /* Do protocol initialization necessary for IOCTL/IOVAR */
7757 #ifdef PCIE_FULL_DONGLE
7758         dhd_os_sdunlock(dhdp);
7759 #endif /* PCIE_FULL_DONGLE */
7760         ret = dhd_prot_init(&dhd->pub);
7761         if (unlikely(ret) != BCME_OK) {
7762                 DHD_PERIM_UNLOCK(dhdp);
7763                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7764                 return ret;
7765         }
7766 #ifdef PCIE_FULL_DONGLE
7767         dhd_os_sdlock(dhdp);
7768 #endif /* PCIE_FULL_DONGLE */
7769
7770         /* If bus is not ready, can't come up */
7771         if (dhd->pub.busstate != DHD_BUS_DATA) {
7772                 DHD_GENERAL_LOCK(&dhd->pub, flags);
7773                 dhd->wd_timer_valid = FALSE;
7774                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7775                 del_timer_sync(&dhd->timer);
7776                 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
7777                 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7778                 dhd_os_sdunlock(dhdp);
7779                 DHD_PERIM_UNLOCK(dhdp);
7780                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7781                 return -ENODEV;
7782         }
7783
7784         dhd_os_sdunlock(dhdp);
7785
7786         /* Bus is ready, query any dongle information */
7787         if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7788                 DHD_GENERAL_LOCK(&dhd->pub, flags);
7789                 dhd->wd_timer_valid = FALSE;
7790                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7791                 del_timer_sync(&dhd->timer);
7792                 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
7793                 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7794                 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7795                 DHD_PERIM_UNLOCK(dhdp);
7796                 return ret;
7797         }
7798
7799 #ifdef ARP_OFFLOAD_SUPPORT
7800         if (dhd->pend_ipaddr) {
7801 #ifdef AOE_IP_ALIAS_SUPPORT
7802                 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
7803 #endif /* AOE_IP_ALIAS_SUPPORT */
7804                 dhd->pend_ipaddr = 0;
7805         }
7806 #endif /* ARP_OFFLOAD_SUPPORT */
7807
7808         DHD_PERIM_UNLOCK(dhdp);
7809         return 0;
7810 }
7811
7812 #ifdef WLTDLS
7813 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
7814 {
7815         char iovbuf[WLC_IOCTL_SMLEN];
7816         uint32 tdls = tdls_on;
7817         int ret = 0;
7818         uint32 tdls_auto_op = 0;
7819         uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
7820         int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
7821         int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
7822         BCM_REFERENCE(mac);
7823         if (!FW_SUPPORTED(dhd, tdls))
7824                 return BCME_ERROR;
7825
7826         if (dhd->tdls_enable == tdls_on)
7827                 goto auto_mode;
7828         bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
7829         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7830                 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
7831                 goto exit;
7832         }
7833         dhd->tdls_enable = tdls_on;
7834 auto_mode:
7835
7836         tdls_auto_op = auto_on;
7837         bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
7838                 iovbuf, sizeof(iovbuf));
7839         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7840                 sizeof(iovbuf), TRUE, 0)) < 0) {
7841                 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
7842                 goto exit;
7843         }
7844
7845         if (tdls_auto_op) {
7846                 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
7847                         sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
7848                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7849                         sizeof(iovbuf), TRUE, 0)) < 0) {
7850                         DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
7851                         goto exit;
7852                 }
7853                 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
7854                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7855                         sizeof(iovbuf), TRUE, 0)) < 0) {
7856                         DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
7857                         goto exit;
7858                 }
7859                 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
7860                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7861                         sizeof(iovbuf), TRUE, 0)) < 0) {
7862                         DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
7863                         goto exit;
7864                 }
7865         }
7866
7867 exit:
7868         return ret;
7869 }
7870
7871 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
7872 {
7873         dhd_info_t *dhd = DHD_DEV_INFO(dev);
7874         int ret = 0;
7875         if (dhd)
7876                 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
7877         else
7878                 ret = BCME_ERROR;
7879         return ret;
7880 }
7881 int
7882 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
7883 {
7884         char iovbuf[WLC_IOCTL_SMLEN];
7885         int ret = 0;
7886         bool auto_on = false;
7887         uint32 mode =  wfd_mode;
7888
7889 #ifdef ENABLE_TDLS_AUTO_MODE
7890         if (wfd_mode) {
7891                 auto_on = false;
7892         } else {
7893                 auto_on = true;
7894         }
7895 #else
7896         auto_on = false;
7897 #endif /* ENABLE_TDLS_AUTO_MODE */
7898         ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
7899         if (ret < 0) {
7900                 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
7901                 return ret;
7902         }
7903
7904
7905         bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
7906                         iovbuf, sizeof(iovbuf));
7907         if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7908                         sizeof(iovbuf), TRUE, 0)) < 0) &&
7909                         (ret != BCME_UNSUPPORTED)) {
7910                 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
7911                 return ret;
7912         }
7913
7914         ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
7915         if (ret < 0) {
7916                 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
7917                 return ret;
7918         }
7919
7920         dhd->tdls_mode = mode;
7921         return ret;
7922 }
7923 #ifdef PCIE_FULL_DONGLE
7924 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
7925 {
7926         dhd_info_t *dhd = DHD_DEV_INFO(dev);
7927         dhd_pub_t *dhdp =  (dhd_pub_t *)&dhd->pub;
7928         tdls_peer_node_t *cur = dhdp->peer_tbl.node;
7929         tdls_peer_node_t *new = NULL, *prev = NULL;
7930         dhd_if_t *dhdif;
7931         uint8 sa[ETHER_ADDR_LEN];
7932         int ifidx = dhd_net2idx(dhd, dev);
7933
7934         if (ifidx == DHD_BAD_IF)
7935                 return;
7936
7937         dhdif = dhd->iflist[ifidx];
7938         memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
7939
7940         if (connect) {
7941                 while (cur != NULL) {
7942                         if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7943                                 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
7944                                         __FUNCTION__, __LINE__));
7945                                 return;
7946                         }
7947                         cur = cur->next;
7948                 }
7949
7950                 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
7951                 if (new == NULL) {
7952                         DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
7953                         return;
7954                 }
7955                 memcpy(new->addr, da, ETHER_ADDR_LEN);
7956                 new->next = dhdp->peer_tbl.node;
7957                 dhdp->peer_tbl.node = new;
7958                 dhdp->peer_tbl.tdls_peer_count++;
7959
7960         } else {
7961                 while (cur != NULL) {
7962                         if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7963                                 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
7964                                 if (prev)
7965                                         prev->next = cur->next;
7966                                 else
7967                                         dhdp->peer_tbl.node = cur->next;
7968                                 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
7969                                 dhdp->peer_tbl.tdls_peer_count--;
7970                                 return;
7971                         }
7972                         prev = cur;
7973                         cur = cur->next;
7974                 }
7975                 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
7976         }
7977 }
7978 #endif /* PCIE_FULL_DONGLE */
7979 #endif 
7980
7981 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
7982 {
7983         if (!dhd)
7984                 return FALSE;
7985
7986         if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
7987                 return TRUE;
7988         else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
7989                 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
7990                 return TRUE;
7991         else
7992                 return FALSE;
7993 }
7994 #if !defined(AP) && defined(WLP2P)
7995 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
7996  * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
7997  * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
7998  * would still be named as fw_bcmdhd_apsta.
7999  */
8000 uint32
8001 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
8002 {
8003         int32 ret = 0;
8004         char buf[WLC_IOCTL_SMLEN];
8005         bool mchan_supported = FALSE;
8006         /* if dhd->op_mode is already set for HOSTAP and Manufacturing
8007          * test mode, that means we only will use the mode as it is
8008          */
8009         if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
8010                 return 0;
8011         if (FW_SUPPORTED(dhd, vsdb)) {
8012                 mchan_supported = TRUE;
8013         }
8014         if (!FW_SUPPORTED(dhd, p2p)) {
8015                 DHD_TRACE(("Chip does not support p2p\n"));
8016                 return 0;
8017         } else {
8018                 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
8019                 memset(buf, 0, sizeof(buf));
8020                 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
8021                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8022                         FALSE, 0)) < 0) {
8023                         DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
8024                         return 0;
8025                 } else {
8026                         if (buf[0] == 1) {
8027                                 /* By default, chip supports single chan concurrency,
8028                                 * now lets check for mchan
8029                                 */
8030                                 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
8031                                 if (mchan_supported)
8032                                         ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
8033                                 if (FW_SUPPORTED(dhd, rsdb)) {
8034                                         ret |= DHD_FLAG_RSDB_MODE;
8035                                 }
8036                                 if (FW_SUPPORTED(dhd, mp2p)) {
8037                                         ret |= DHD_FLAG_MP2P_MODE;
8038                                 }
8039 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
8040                                 return ret;
8041 #else
8042                                 return 0;
8043 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
8044                         }
8045                 }
8046         }
8047         return 0;
8048 }
8049 #endif 
8050
8051 #ifdef SUPPORT_AP_POWERSAVE
8052 #define RXCHAIN_PWRSAVE_PPS                     10
8053 #define RXCHAIN_PWRSAVE_QUIET_TIME              10
8054 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK        0
8055 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
8056 {
8057         char iovbuf[128];
8058         int32 pps = RXCHAIN_PWRSAVE_PPS;
8059         int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
8060         int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
8061
8062         if (enable) {
8063                 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8064                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8065                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8066                         DHD_ERROR(("Failed to enable AP power save\n"));
8067                 }
8068                 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
8069                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8070                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8071                         DHD_ERROR(("Failed to set pps\n"));
8072                 }
8073                 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
8074                 4, iovbuf, sizeof(iovbuf));
8075                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8076                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8077                         DHD_ERROR(("Failed to set quiet time\n"));
8078                 }
8079                 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
8080                 4, iovbuf, sizeof(iovbuf));
8081                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8082                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8083                         DHD_ERROR(("Failed to set stas assoc check\n"));
8084                 }
8085         } else {
8086                 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8087                 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8088                     iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8089                         DHD_ERROR(("Failed to disable AP power save\n"));
8090                 }
8091         }
8092
8093         return 0;
8094 }
8095 #endif /* SUPPORT_AP_POWERSAVE */
8096
8097
8098 int
8099 dhd_preinit_ioctls(dhd_pub_t *dhd)
8100 {
8101         int ret = 0;
8102         char eventmask[WL_EVENTING_MASK_LEN];
8103         char iovbuf[WL_EVENTING_MASK_LEN + 12]; /*  Room for "event_msgs" + '\0' + bitvec  */
8104         uint32 buf_key_b4_m4 = 1;
8105 #ifndef WL_CFG80211
8106         u32 up = 0;
8107 #endif
8108         uint8 msglen;
8109         eventmsgs_ext_t *eventmask_msg = NULL;
8110         char* iov_buf = NULL;
8111         int ret2 = 0;
8112 #if defined(CUSTOM_AMPDU_BA_WSIZE)
8113         uint32 ampdu_ba_wsize = 0;
8114 #endif 
8115 #if defined(CUSTOM_AMPDU_MPDU)
8116         int32 ampdu_mpdu = 0;
8117 #endif
8118 #if defined(CUSTOM_AMPDU_RELEASE)
8119         int32 ampdu_release = 0;
8120 #endif
8121 #if defined(CUSTOM_AMSDU_AGGSF)
8122         int32 amsdu_aggsf = 0;
8123 #endif
8124 #ifdef SUPPORT_SENSORHUB
8125         int32 shub_enable = 0;
8126 #endif /* SUPPORT_SENSORHUB */
8127 #if defined(BCMSDIO)
8128 #ifdef PROP_TXSTATUS
8129         int wlfc_enable = TRUE;
8130 #ifndef DISABLE_11N
8131         uint32 hostreorder = 1;
8132         uint wl_down = 1;
8133 #endif /* DISABLE_11N */
8134 #endif /* PROP_TXSTATUS */
8135 #endif 
8136 #ifdef PCIE_FULL_DONGLE
8137         uint32 wl_ap_isolate;
8138 #endif /* PCIE_FULL_DONGLE */
8139
8140 #if defined(BCMSDIO)
8141         /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
8142         uint32 frameburst = 0;
8143 #else
8144         uint32 frameburst = 1;
8145 #endif /* BCMSDIO */
8146
8147 #ifdef DHD_ENABLE_LPC
8148         uint32 lpc = 1;
8149 #endif /* DHD_ENABLE_LPC */
8150         uint power_mode = PM_FAST;
8151 #if defined(BCMSDIO)
8152         uint32 dongle_align = DHD_SDALIGN;
8153         uint32 glom = CUSTOM_GLOM_SETTING;
8154 #endif /* defined(BCMSDIO) */
8155 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8156         uint32 credall = 1;
8157 #endif
8158         uint bcn_timeout = dhd->conf->bcn_timeout;
8159 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
8160         uint32 bcn_li_bcn = 1;
8161 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8162         uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
8163 #if defined(ARP_OFFLOAD_SUPPORT)
8164         int arpoe = 1;
8165 #endif
8166         int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
8167         int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
8168         int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
8169         char buf[WLC_IOCTL_SMLEN];
8170         char *ptr;
8171         uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
8172 #ifdef ROAM_ENABLE
8173         uint roamvar = 0;
8174         int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
8175         int roam_scan_period[2] = {10, WLC_BAND_ALL};
8176         int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
8177 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
8178         int roam_fullscan_period = 60;
8179 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8180         int roam_fullscan_period = 120;
8181 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8182 #else
8183 #ifdef DISABLE_BUILTIN_ROAM
8184         uint roamvar = 1;
8185 #endif /* DISABLE_BUILTIN_ROAM */
8186 #endif /* ROAM_ENABLE */
8187
8188 #if defined(SOFTAP)
8189         uint dtim = 1;
8190 #endif
8191 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
8192         uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
8193         struct ether_addr p2p_ea;
8194 #endif
8195 #ifdef SOFTAP_UAPSD_OFF
8196         uint32 wme_apsd = 0;
8197 #endif /* SOFTAP_UAPSD_OFF */
8198 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
8199         uint32 apsta = 1; /* Enable APSTA mode */
8200 #elif defined(SOFTAP_AND_GC)
8201         uint32 apsta = 0;
8202         int ap_mode = 1;
8203 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
8204 #ifdef GET_CUSTOM_MAC_ENABLE
8205         struct ether_addr ea_addr;
8206 #endif /* GET_CUSTOM_MAC_ENABLE */
8207
8208 #ifdef DISABLE_11N
8209         uint32 nmode = 0;
8210 #endif /* DISABLE_11N */
8211
8212 #ifdef USE_WL_TXBF
8213         uint32 txbf = 1;
8214 #endif /* USE_WL_TXBF */
8215 #if defined(PROP_TXSTATUS)
8216 #ifdef USE_WFA_CERT_CONF
8217         uint32 proptx = 0;
8218 #endif /* USE_WFA_CERT_CONF */
8219 #endif /* PROP_TXSTATUS */
8220 #ifdef CUSTOM_PSPRETEND_THR
8221         uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
8222 #endif
8223         uint32 rsdb_mode = 0;
8224 #ifdef ENABLE_TEMP_THROTTLING
8225         wl_temp_control_t temp_control;
8226 #endif /* ENABLE_TEMP_THROTTLING */
8227 #ifdef DISABLE_PRUNED_SCAN
8228         uint32 scan_features = 0;
8229 #endif /* DISABLE_PRUNED_SCAN */
8230 #ifdef CUSTOM_EVENT_PM_WAKE
8231         uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
8232 #endif /* CUSTOM_EVENT_PM_WAKE */
8233 #ifdef PKT_FILTER_SUPPORT
8234         dhd_pkt_filter_enable = TRUE;
8235 #endif /* PKT_FILTER_SUPPORT */
8236 #ifdef WLTDLS
8237         dhd->tdls_enable = FALSE;
8238         dhd_tdls_set_mode(dhd, false);
8239 #endif /* WLTDLS */
8240         dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
8241         DHD_TRACE(("Enter %s\n", __FUNCTION__));
8242
8243         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
8244 #ifdef DHDTCPACK_SUPPRESS
8245         printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
8246         dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
8247 #endif
8248
8249         dhd->op_mode = 0;
8250 #ifdef CUSTOMER_HW4_DEBUG
8251         if (!dhd_validate_chipid(dhd)) {
8252                 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
8253                         __FUNCTION__, dhd_bus_chip_id(dhd)));
8254 #ifndef SUPPORT_MULTIPLE_CHIPS
8255                 ret = BCME_BADARG;
8256                 goto done;
8257 #endif /* !SUPPORT_MULTIPLE_CHIPS */
8258         }
8259 #endif /* CUSTOMER_HW4_DEBUG */
8260         if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8261                 (op_mode == DHD_FLAG_MFG_MODE)) {
8262 #ifdef DHD_PCIE_RUNTIMEPM
8263                 /* Disable RuntimePM in mfg mode */
8264                 DHD_DISABLE_RUNTIME_PM(dhd);
8265                 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
8266 #endif /* DHD_PCIE_RUNTIME_PM */
8267                 /* Check and adjust IOCTL response timeout for Manufactring firmware */
8268                 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
8269                 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
8270                         __FUNCTION__));
8271         } else {
8272                 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
8273                 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
8274         }
8275 #ifdef GET_CUSTOM_MAC_ENABLE
8276         ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
8277         if (!ret) {
8278                 memset(buf, 0, sizeof(buf));
8279                 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
8280                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8281                 if (ret < 0) {
8282                         DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
8283                                 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
8284                         ret = BCME_NOTUP;
8285                         goto done;
8286                 }
8287                 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
8288         } else {
8289 #endif /* GET_CUSTOM_MAC_ENABLE */
8290                 /* Get the default device MAC address directly from firmware */
8291                 memset(buf, 0, sizeof(buf));
8292                 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
8293                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8294                         FALSE, 0)) < 0) {
8295                         DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
8296                         ret = BCME_NOTUP;
8297                         goto done;
8298                 }
8299                 /* Update public MAC address after reading from Firmware */
8300                 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
8301
8302 #ifdef GET_CUSTOM_MAC_ENABLE
8303         }
8304 #endif /* GET_CUSTOM_MAC_ENABLE */
8305
8306         /* get a capabilities from firmware */
8307         {
8308                 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
8309                 memset(dhd->fw_capabilities, 0, cap_buf_size);
8310                 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
8311                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
8312                         (cap_buf_size - 1), FALSE, 0)) < 0)
8313                 {
8314                         DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
8315                                 __FUNCTION__, ret));
8316                         return 0;
8317                 }
8318
8319                 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
8320                 dhd->fw_capabilities[0] = ' ';
8321                 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
8322                 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
8323         }
8324
8325         if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
8326                 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
8327 #ifdef SET_RANDOM_MAC_SOFTAP
8328                 uint rand_mac;
8329 #endif /* SET_RANDOM_MAC_SOFTAP */
8330                 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
8331 #if defined(ARP_OFFLOAD_SUPPORT)
8332                         arpoe = 0;
8333 #endif
8334 #ifdef PKT_FILTER_SUPPORT
8335                         dhd_pkt_filter_enable = FALSE;
8336 #endif
8337 #ifdef SET_RANDOM_MAC_SOFTAP
8338                 SRANDOM32((uint)jiffies);
8339                 rand_mac = RANDOM32();
8340                 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02;   /* local admin bit */
8341                 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
8342                 iovbuf[2] = (unsigned char)vendor_oui;
8343                 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
8344                 iovbuf[4] = (unsigned char)(rand_mac >> 8);
8345                 iovbuf[5] = (unsigned char)(rand_mac >> 16);
8346
8347                 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
8348                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8349                 if (ret < 0) {
8350                         DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
8351                 } else
8352                         memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
8353 #endif /* SET_RANDOM_MAC_SOFTAP */
8354 #if !defined(AP) && defined(WL_CFG80211)
8355                 /* Turn off MPC in AP mode */
8356                 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8357                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8358                         sizeof(iovbuf), TRUE, 0)) < 0) {
8359                         DHD_ERROR(("%s mpc for HostAPD failed  %d\n", __FUNCTION__, ret));
8360                 }
8361 #endif
8362 #ifdef USE_DYNAMIC_F2_BLKSIZE
8363                 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8364 #endif /* USE_DYNAMIC_F2_BLKSIZE */
8365 #ifdef SUPPORT_AP_POWERSAVE
8366                 dhd_set_ap_powersave(dhd, 0, TRUE);
8367 #endif /* SUPPORT_AP_POWERSAVE */
8368 #ifdef SOFTAP_UAPSD_OFF
8369                 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
8370                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8371                         sizeof(iovbuf), TRUE, 0)) < 0) {
8372                         DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
8373                                 __FUNCTION__, ret));
8374                 }
8375 #endif /* SOFTAP_UAPSD_OFF */
8376         } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8377                 (op_mode == DHD_FLAG_MFG_MODE)) {
8378 #if defined(ARP_OFFLOAD_SUPPORT)
8379                 arpoe = 0;
8380 #endif /* ARP_OFFLOAD_SUPPORT */
8381 #ifdef PKT_FILTER_SUPPORT
8382                 dhd_pkt_filter_enable = FALSE;
8383 #endif /* PKT_FILTER_SUPPORT */
8384                 dhd->op_mode = DHD_FLAG_MFG_MODE;
8385 #ifdef USE_DYNAMIC_F2_BLKSIZE
8386                 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8387 #endif /* USE_DYNAMIC_F2_BLKSIZE */
8388                 if (FW_SUPPORTED(dhd, rsdb)) {
8389                         rsdb_mode = 0;
8390                         bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
8391                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8392                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8393                                 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
8394                                         __FUNCTION__, ret));
8395                         }
8396                 }
8397         } else {
8398                 uint32 concurrent_mode = 0;
8399                 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
8400                         (op_mode == DHD_FLAG_P2P_MODE)) {
8401 #if defined(ARP_OFFLOAD_SUPPORT)
8402                         arpoe = 0;
8403 #endif
8404 #ifdef PKT_FILTER_SUPPORT
8405                         dhd_pkt_filter_enable = FALSE;
8406 #endif
8407                         dhd->op_mode = DHD_FLAG_P2P_MODE;
8408                 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
8409                         (op_mode == DHD_FLAG_IBSS_MODE)) {
8410                         dhd->op_mode = DHD_FLAG_IBSS_MODE;
8411                 } else
8412                         dhd->op_mode = DHD_FLAG_STA_MODE;
8413 #if !defined(AP) && defined(WLP2P)
8414                 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
8415                         (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
8416 #if defined(ARP_OFFLOAD_SUPPORT)
8417                         arpoe = 1;
8418 #endif
8419                         dhd->op_mode |= concurrent_mode;
8420                 }
8421
8422                 /* Check if we are enabling p2p */
8423                 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8424                         bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8425                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8426                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8427                                 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
8428                         }
8429
8430 #if defined(SOFTAP_AND_GC)
8431                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
8432                                 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
8433                                         DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
8434                         }
8435 #endif
8436                         memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
8437                         ETHER_SET_LOCALADDR(&p2p_ea);
8438                         bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
8439                                 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
8440                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8441                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8442                                 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
8443                         } else {
8444                                 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
8445                         }
8446                 }
8447 #else
8448                 (void)concurrent_mode;
8449 #endif 
8450         }
8451
8452 #ifdef RSDB_MODE_FROM_FILE
8453         (void)dhd_rsdb_mode_from_file(dhd);
8454 #endif /* RSDB_MODE_FROM_FILE */
8455
8456 #ifdef DISABLE_PRUNED_SCAN
8457         if (FW_SUPPORTED(dhd, rsdb)) {
8458                 memset(iovbuf, 0, sizeof(iovbuf));
8459                 bcm_mkiovar("scan_features", (char *)&scan_features,
8460                         4, iovbuf, sizeof(iovbuf));
8461                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
8462                         iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8463                         DHD_ERROR(("%s get scan_features is failed ret=%d\n",
8464                                 __FUNCTION__, ret));
8465                 } else {
8466                         memcpy(&scan_features, iovbuf, 4);
8467                         scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
8468                         memset(iovbuf, 0, sizeof(iovbuf));
8469                         bcm_mkiovar("scan_features", (char *)&scan_features,
8470                                 4, iovbuf, sizeof(iovbuf));
8471                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8472                                 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8473                                 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
8474                                         __FUNCTION__, ret));
8475                         }
8476                 }
8477         }
8478 #endif /* DISABLE_PRUNED_SCAN */
8479
8480         DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
8481                 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
8482         #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
8483         if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
8484                 dhd->info->rxthread_enabled = FALSE;
8485         else
8486                 dhd->info->rxthread_enabled = TRUE;
8487         #endif
8488         /* Set Country code  */
8489         if (dhd->dhd_cspec.ccode[0] != 0) {
8490                 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
8491                 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
8492                         sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
8493                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8494                         printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
8495         } else {
8496                 dhd_conf_set_country(dhd);
8497                 dhd_conf_fix_country(dhd);
8498         }
8499         dhd_conf_get_country(dhd, &dhd->dhd_cspec);
8500
8501
8502         /* Set Listen Interval */
8503         bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
8504         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8505                 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
8506
8507 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
8508 #ifdef USE_WFA_CERT_CONF
8509         if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
8510                 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
8511         }
8512 #endif /* USE_WFA_CERT_CONF */
8513         /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
8514         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
8515         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8516 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
8517 #if defined(ROAM_ENABLE)
8518         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
8519                 sizeof(roam_trigger), TRUE, 0)) < 0)
8520                 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
8521         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
8522                 sizeof(roam_scan_period), TRUE, 0)) < 0)
8523                 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
8524         if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
8525                 sizeof(roam_delta), TRUE, 0)) < 0)
8526                 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
8527         bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
8528         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8529                 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
8530 #endif /* ROAM_ENABLE */
8531         dhd_conf_set_roam(dhd);
8532
8533 #ifdef CUSTOM_EVENT_PM_WAKE
8534         bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
8535         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8536                 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
8537         }
8538 #endif /* CUSTOM_EVENT_PM_WAKE */
8539 #ifdef WLTDLS
8540 #ifdef ENABLE_TDLS_AUTO_MODE
8541         /* by default TDLS on and auto mode on */
8542         _dhd_tdls_enable(dhd, true, true, NULL);
8543 #else
8544         /* by default TDLS on and auto mode off */
8545         _dhd_tdls_enable(dhd, true, false, NULL);
8546 #endif /* ENABLE_TDLS_AUTO_MODE */
8547 #endif /* WLTDLS */
8548
8549 #ifdef DHD_ENABLE_LPC
8550         /* Set lpc 1 */
8551         bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8552         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8553                 sizeof(iovbuf), TRUE, 0)) < 0) {
8554                 DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
8555
8556                 if (ret == BCME_NOTDOWN) {
8557                         uint wl_down = 1;
8558                         ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
8559                                 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8560                         DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
8561
8562                         bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8563                         ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8564                         DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
8565                 }
8566         }
8567 #endif /* DHD_ENABLE_LPC */
8568         dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
8569
8570         /* Set PowerSave mode */
8571         if (dhd->conf->pm >= 0)
8572                 power_mode = dhd->conf->pm;
8573         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
8574
8575 #if defined(BCMSDIO)
8576         /* Match Host and Dongle rx alignment */
8577         bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
8578         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8579
8580 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8581         /* enable credall to reduce the chance of no bus credit happened. */
8582         bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
8583         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8584 #endif
8585
8586 #ifdef USE_WFA_CERT_CONF
8587         if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
8588                 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
8589         }
8590 #endif /* USE_WFA_CERT_CONF */
8591         if (glom != DEFAULT_GLOM_VALUE) {
8592                 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
8593                 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
8594                 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8595         }
8596 #endif /* defined(BCMSDIO) */
8597
8598         /* Setup timeout if Beacons are lost and roam is off to report link down */
8599         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
8600         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8601         /* Setup assoc_retry_max count to reconnect target AP in dongle */
8602         bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
8603         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8604 #if defined(AP) && !defined(WLP2P)
8605         /* Turn off MPC in AP mode */
8606         bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8607         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8608         bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8609         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8610 #endif /* defined(AP) && !defined(WLP2P) */
8611         /*  0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
8612         dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
8613         dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
8614         dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
8615         dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
8616         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
8617         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
8618         dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
8619         dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
8620
8621 #ifdef MIMO_ANT_SETTING
8622         dhd_sel_ant_from_file(dhd);
8623 #endif /* MIMO_ANT_SETTING */
8624
8625 #if defined(SOFTAP)
8626         if (ap_fw_loaded == TRUE) {
8627                 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
8628         }
8629 #endif 
8630
8631 #if defined(KEEP_ALIVE)
8632         {
8633         /* Set Keep Alive : be sure to use FW with -keepalive */
8634         int res;
8635
8636 #if defined(SOFTAP)
8637         if (ap_fw_loaded == FALSE)
8638 #endif 
8639                 if (!(dhd->op_mode &
8640                         (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
8641                         if ((res = dhd_keep_alive_onoff(dhd)) < 0)
8642                                 DHD_ERROR(("%s set keeplive failed %d\n",
8643                                 __FUNCTION__, res));
8644                 }
8645         }
8646 #endif /* defined(KEEP_ALIVE) */
8647
8648 #ifdef USE_WL_TXBF
8649         bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
8650         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8651                 sizeof(iovbuf), TRUE, 0)) < 0) {
8652                 DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
8653         }
8654 #endif /* USE_WL_TXBF */
8655         dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
8656
8657 #ifdef USE_WFA_CERT_CONF
8658 #ifdef USE_WL_FRAMEBURST
8659          if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
8660                 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
8661          }
8662 #endif /* USE_WL_FRAMEBURST */
8663 #ifdef DISABLE_FRAMEBURST_VSDB
8664         g_frameburst = frameburst;
8665 #endif /* DISABLE_FRAMEBURST_VSDB */
8666 #endif /* USE_WFA_CERT_CONF */
8667 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
8668         /* Disable Framebursting for SofAP */
8669         if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
8670                 frameburst = 0;
8671         }
8672 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
8673         /* Set frameburst to value */
8674         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
8675                 sizeof(frameburst), TRUE, 0)) < 0) {
8676                 DHD_INFO(("%s frameburst not supported  %d\n", __FUNCTION__, ret));
8677         }
8678         dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE);
8679 #if defined(CUSTOM_AMPDU_BA_WSIZE)
8680         /* Set ampdu ba wsize to 64 or 16 */
8681 #ifdef CUSTOM_AMPDU_BA_WSIZE
8682         ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
8683 #endif
8684         if (ampdu_ba_wsize != 0) {
8685                 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
8686                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8687                         sizeof(iovbuf), TRUE, 0)) < 0) {
8688                         DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
8689                                 __FUNCTION__, ampdu_ba_wsize, ret));
8690                 }
8691         }
8692 #endif 
8693         dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
8694
8695         iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
8696         if (iov_buf == NULL) {
8697                 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
8698                 ret = BCME_NOMEM;
8699                 goto done;
8700         }
8701 #ifdef ENABLE_TEMP_THROTTLING
8702         if (dhd->op_mode & DHD_FLAG_STA_MODE) {
8703                 memset(&temp_control, 0, sizeof(temp_control));
8704                 temp_control.enable = 1;
8705                 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
8706                 bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
8707                                 sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
8708                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
8709                 if (ret < 0) {
8710                         DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
8711                                 __FUNCTION__, ret));
8712                 }
8713         }
8714 #endif /* ENABLE_TEMP_THROTTLING */
8715 #if defined(CUSTOM_AMPDU_MPDU)
8716         ampdu_mpdu = CUSTOM_AMPDU_MPDU;
8717         if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
8718                 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
8719                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8720                         sizeof(iovbuf), TRUE, 0)) < 0) {
8721                         DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
8722                                 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
8723                 }
8724         }
8725 #endif /* CUSTOM_AMPDU_MPDU */
8726
8727 #if defined(CUSTOM_AMPDU_RELEASE)
8728         ampdu_release = CUSTOM_AMPDU_RELEASE;
8729         if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
8730                 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
8731                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8732                         sizeof(iovbuf), TRUE, 0)) < 0) {
8733                         DHD_ERROR(("%s Set ampdu_release to %d failed  %d\n",
8734                                 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
8735                 }
8736         }
8737 #endif /* CUSTOM_AMPDU_RELEASE */
8738
8739 #if defined(CUSTOM_AMSDU_AGGSF)
8740         amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
8741         if (amsdu_aggsf != 0) {
8742                 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
8743                 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8744                 if (ret < 0) {
8745                         DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
8746                                 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
8747                 }
8748         }
8749 #endif /* CUSTOM_AMSDU_AGGSF */
8750
8751 #ifdef CUSTOM_PSPRETEND_THR
8752         /* Turn off MPC in AP mode */
8753         bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
8754                 iovbuf, sizeof(iovbuf));
8755         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8756                 sizeof(iovbuf), TRUE, 0)) < 0) {
8757                 DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
8758                         __FUNCTION__, ret));
8759         }
8760 #endif
8761
8762         bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
8763         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8764                 sizeof(iovbuf), TRUE, 0)) < 0) {
8765                 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
8766         }
8767
8768         /* Read event_msgs mask */
8769         bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8770         if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8771                 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
8772                 goto done;
8773         }
8774         bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
8775
8776         /* Setup event_msgs */
8777         setbit(eventmask, WLC_E_SET_SSID);
8778         setbit(eventmask, WLC_E_PRUNE);
8779         setbit(eventmask, WLC_E_AUTH);
8780         setbit(eventmask, WLC_E_AUTH_IND);
8781         setbit(eventmask, WLC_E_ASSOC);
8782         setbit(eventmask, WLC_E_REASSOC);
8783         setbit(eventmask, WLC_E_REASSOC_IND);
8784         if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
8785                 setbit(eventmask, WLC_E_DEAUTH);
8786         setbit(eventmask, WLC_E_DEAUTH_IND);
8787         setbit(eventmask, WLC_E_DISASSOC_IND);
8788         setbit(eventmask, WLC_E_DISASSOC);
8789         setbit(eventmask, WLC_E_JOIN);
8790         setbit(eventmask, WLC_E_START);
8791         setbit(eventmask, WLC_E_ASSOC_IND);
8792         setbit(eventmask, WLC_E_PSK_SUP);
8793         setbit(eventmask, WLC_E_LINK);
8794         setbit(eventmask, WLC_E_MIC_ERROR);
8795         setbit(eventmask, WLC_E_ASSOC_REQ_IE);
8796         setbit(eventmask, WLC_E_ASSOC_RESP_IE);
8797 #ifndef WL_CFG80211
8798         setbit(eventmask, WLC_E_PMKID_CACHE);
8799         setbit(eventmask, WLC_E_TXFAIL);
8800 #endif
8801         setbit(eventmask, WLC_E_JOIN_START);
8802 //      setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
8803 #ifdef DHD_DEBUG
8804         setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
8805 #endif
8806 #ifdef WLMEDIA_HTSF
8807         setbit(eventmask, WLC_E_HTSFSYNC);
8808 #endif /* WLMEDIA_HTSF */
8809 #ifdef PNO_SUPPORT
8810         setbit(eventmask, WLC_E_PFN_NET_FOUND);
8811         setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
8812         setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
8813         setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
8814 #endif /* PNO_SUPPORT */
8815         /* enable dongle roaming event */
8816         setbit(eventmask, WLC_E_ROAM);
8817         setbit(eventmask, WLC_E_BSSID);
8818 #ifdef WLTDLS
8819         setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
8820 #endif /* WLTDLS */
8821 #ifdef WL_ESCAN
8822         setbit(eventmask, WLC_E_ESCAN_RESULT);
8823 #endif
8824 #ifdef WL_CFG80211
8825         setbit(eventmask, WLC_E_ESCAN_RESULT);
8826         setbit(eventmask, WLC_E_AP_STARTED);
8827         if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8828                 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
8829                 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
8830         }
8831 #endif /* WL_CFG80211 */
8832
8833 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
8834         if (dhd_logtrace_from_file(dhd)) {
8835                 setbit(eventmask, WLC_E_TRACE);
8836         } else {
8837                 clrbit(eventmask, WLC_E_TRACE);
8838         }
8839 #elif defined(SHOW_LOGTRACE)
8840         setbit(eventmask, WLC_E_TRACE);
8841 #else
8842         clrbit(eventmask, WLC_E_TRACE);
8843 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
8844
8845         setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
8846 #ifdef DHD_LOSSLESS_ROAMING
8847         setbit(eventmask, WLC_E_ROAM_PREP);
8848 #endif
8849 #ifdef CUSTOM_EVENT_PM_WAKE
8850         setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
8851 #endif /* CUSTOM_EVENT_PM_WAKE */
8852 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
8853         dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
8854 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
8855
8856         /* Write updated Event mask */
8857         bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8858         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8859                 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
8860                 goto done;
8861         }
8862
8863         /* make up event mask ext message iovar for event larger than 128 */
8864         msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
8865         eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
8866         if (eventmask_msg == NULL) {
8867                 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
8868                 ret = BCME_NOMEM;
8869                 goto done;
8870         }
8871         bzero(eventmask_msg, msglen);
8872         eventmask_msg->ver = EVENTMSGS_VER;
8873         eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8874
8875         /* Read event_msgs_ext mask */
8876         bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
8877         ret2  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
8878         if (ret2 == 0) { /* event_msgs_ext must be supported */
8879                 bcopy(iov_buf, eventmask_msg, msglen);
8880 #ifdef GSCAN_SUPPORT
8881                 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
8882                 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
8883                 setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
8884 #endif /* GSCAN_SUPPORT */
8885 #ifdef BT_WIFI_HANDOVER
8886                 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
8887 #endif /* BT_WIFI_HANDOVER */
8888
8889                 /* Write updated Event mask */
8890                 eventmask_msg->ver = EVENTMSGS_VER;
8891                 eventmask_msg->command = EVENTMSGS_SET_MASK;
8892                 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8893                 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
8894                         msglen, iov_buf, WLC_IOCTL_SMLEN);
8895                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8896                         iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
8897                         DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
8898                         goto done;
8899                 }
8900         } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
8901                 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
8902                 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
8903                         __FUNCTION__, ret2));
8904         } else {
8905                 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
8906                 ret = ret2;
8907                 goto done;
8908         }
8909
8910         dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
8911                 sizeof(scan_assoc_time), TRUE, 0);
8912         dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
8913                 sizeof(scan_unassoc_time), TRUE, 0);
8914         dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
8915                 sizeof(scan_passive_time), TRUE, 0);
8916
8917 #ifdef ARP_OFFLOAD_SUPPORT
8918         /* Set and enable ARP offload feature for STA only  */
8919 #if defined(SOFTAP)
8920         if (arpoe && !ap_fw_loaded)
8921 #else
8922         if (arpoe)
8923 #endif
8924         {
8925                 dhd_arp_offload_enable(dhd, TRUE);
8926                 dhd_arp_offload_set(dhd, dhd_arp_mode);
8927         } else {
8928                 dhd_arp_offload_enable(dhd, FALSE);
8929                 dhd_arp_offload_set(dhd, 0);
8930         }
8931         dhd_arp_enable = arpoe;
8932 #endif /* ARP_OFFLOAD_SUPPORT */
8933
8934 #ifdef PKT_FILTER_SUPPORT
8935         /* Setup default defintions for pktfilter , enable in suspend */
8936         if (dhd_master_mode) {
8937                 dhd->pktfilter_count = 6;
8938                 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
8939                 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
8940                 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8941                 /* apply APP pktfilter */
8942                 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
8943
8944                 /* Setup filter to allow only unicast */
8945                 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
8946
8947                 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
8948                 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
8949
8950 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
8951                 dhd->pktfilter_count = 4;
8952                 /* Setup filter to block broadcast and NAT Keepalive packets */
8953                 /* discard all broadcast packets */
8954                 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
8955                 /* discard NAT Keepalive packets */
8956                 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
8957                 /* discard NAT Keepalive packets */
8958                 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
8959                 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8960 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
8961         } else
8962                 dhd_conf_discard_pkt_filter(dhd);
8963         dhd_conf_add_pkt_filter(dhd);
8964
8965 #if defined(SOFTAP)
8966         if (ap_fw_loaded) {
8967                 dhd_enable_packet_filter(0, dhd);
8968         }
8969 #endif /* defined(SOFTAP) */
8970         dhd_set_packet_filter(dhd);
8971 #endif /* PKT_FILTER_SUPPORT */
8972 #ifdef DISABLE_11N
8973         bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
8974         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8975                 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
8976 #endif /* DISABLE_11N */
8977
8978 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
8979         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
8980         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8981 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8982         /* query for 'ver' to get version info from firmware */
8983         memset(buf, 0, sizeof(buf));
8984         ptr = buf;
8985         bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
8986         if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
8987                 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
8988         else {
8989                 bcmstrtok(&ptr, "\n", 0);
8990                 /* Print fw version info */
8991                 DHD_ERROR(("Firmware version = %s\n", buf));
8992                 strncpy(fw_version, buf, FW_VER_STR_LEN);
8993                 dhd_set_version_info(dhd, buf);
8994 #ifdef WRITE_WLANINFO
8995                 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
8996 #endif /* WRITE_WLANINFO */
8997         }
8998
8999 #if defined(BCMSDIO)
9000         dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
9001         // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
9002         dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 1, FALSE);
9003 #endif /* defined(BCMSDIO) */
9004
9005         dhd_conf_set_disable_proptx(dhd);
9006 #if defined(BCMSDIO)
9007 #ifdef PROP_TXSTATUS
9008         if (disable_proptx ||
9009 #ifdef PROP_TXSTATUS_VSDB
9010                 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
9011                 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
9012                  dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
9013 #endif /* PROP_TXSTATUS_VSDB */
9014                 FALSE) {
9015                 wlfc_enable = FALSE;
9016         }
9017
9018 #ifdef USE_WFA_CERT_CONF
9019         if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
9020                 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
9021                 wlfc_enable = proptx;
9022         }
9023 #endif /* USE_WFA_CERT_CONF */
9024
9025 #ifndef DISABLE_11N
9026         ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
9027         bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
9028         if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
9029                 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
9030                 if (ret2 != BCME_UNSUPPORTED)
9031                         ret = ret2;
9032
9033                 if (ret == BCME_NOTDOWN) {
9034                         uint wl_down = 1;
9035                         ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
9036                                 sizeof(wl_down), TRUE, 0);
9037                         DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
9038                                 __FUNCTION__, ret2, hostreorder));
9039
9040                         bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
9041                                 iovbuf, sizeof(iovbuf));
9042                         ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9043                         DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
9044                         if (ret2 != BCME_UNSUPPORTED)
9045                                         ret = ret2;
9046                 }
9047                 if (ret2 != BCME_OK)
9048                         hostreorder = 0;
9049         }
9050 #endif /* DISABLE_11N */
9051
9052
9053         if (wlfc_enable)
9054                 dhd_wlfc_init(dhd);
9055 #ifndef DISABLE_11N
9056         else if (hostreorder)
9057                 dhd_wlfc_hostreorder_init(dhd);
9058 #endif /* DISABLE_11N */
9059
9060 #endif /* PROP_TXSTATUS */
9061 #endif /* BCMSDIO || BCMBUS */
9062 #ifdef PCIE_FULL_DONGLE
9063         /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
9064         if (FW_SUPPORTED(dhd, ap)) {
9065                 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
9066                 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
9067                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9068                         DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9069         }
9070 #endif /* PCIE_FULL_DONGLE */
9071 #ifdef PNO_SUPPORT
9072         if (!dhd->pno_state) {
9073                 dhd_pno_init(dhd);
9074         }
9075 #endif
9076 #ifdef WL11U
9077         dhd_interworking_enable(dhd);
9078 #endif /* WL11U */
9079 #ifndef WL_CFG80211
9080         dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
9081 #endif
9082
9083 #ifdef SUPPORT_SENSORHUB
9084         bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
9085         if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
9086                 FALSE, 0)) < 0) {
9087                 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
9088                         __FUNCTION__, ret));
9089                 dhd->info->shub_enable = 0;
9090         } else {
9091                 memcpy(&shub_enable, iovbuf, sizeof(uint32));
9092                 dhd->info->shub_enable = shub_enable;
9093                 DHD_ERROR(("%s: checking sensorhub enable %d\n",
9094                         __FUNCTION__, dhd->info->shub_enable));
9095         }
9096 #endif /* SUPPORT_SENSORHUB */
9097 done:
9098
9099         if (eventmask_msg)
9100                 kfree(eventmask_msg);
9101         if (iov_buf)
9102                 kfree(iov_buf);
9103
9104         return ret;
9105 }
9106
9107
9108 int
9109 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
9110 {
9111         char buf[strlen(name) + 1 + cmd_len];
9112         int len = sizeof(buf);
9113         wl_ioctl_t ioc;
9114         int ret;
9115
9116         len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
9117
9118         memset(&ioc, 0, sizeof(ioc));
9119
9120         ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
9121         ioc.buf = buf;
9122         ioc.len = len;
9123         ioc.set = set;
9124
9125         ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
9126         if (!set && ret >= 0)
9127                 memcpy(cmd_buf, buf, cmd_len);
9128
9129         return ret;
9130 }
9131
9132 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
9133 {
9134         struct dhd_info *dhd = dhdp->info;
9135         struct net_device *dev = NULL;
9136
9137         ASSERT(dhd && dhd->iflist[ifidx]);
9138         dev = dhd->iflist[ifidx]->net;
9139         ASSERT(dev);
9140
9141         if (netif_running(dev)) {
9142                 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
9143                 return BCME_NOTDOWN;
9144         }
9145
9146 #define DHD_MIN_MTU 1500
9147 #define DHD_MAX_MTU 1752
9148
9149         if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
9150                 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
9151                 return BCME_BADARG;
9152         }
9153
9154         dev->mtu = new_mtu;
9155         return 0;
9156 }
9157
9158 #ifdef ARP_OFFLOAD_SUPPORT
9159 /* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
9160 void
9161 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
9162 {
9163         u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
9164         int i;
9165         int ret;
9166
9167         bzero(ipv4_buf, sizeof(ipv4_buf));
9168
9169         /* display what we've got */
9170         ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9171         DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
9172 #ifdef AOE_DBG
9173         dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9174 #endif
9175         /* now we saved hoste_ip table, clr it in the dongle AOE */
9176         dhd_aoe_hostip_clr(dhd_pub, idx);
9177
9178         if (ret) {
9179                 DHD_ERROR(("%s failed\n", __FUNCTION__));
9180                 return;
9181         }
9182
9183         for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
9184                 if (add && (ipv4_buf[i] == 0)) {
9185                                 ipv4_buf[i] = ipa;
9186                                 add = FALSE; /* added ipa to local table  */
9187                                 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
9188                                 __FUNCTION__, i));
9189                 } else if (ipv4_buf[i] == ipa) {
9190                         ipv4_buf[i]     = 0;
9191                         DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
9192                                 __FUNCTION__, ipa, i));
9193                 }
9194
9195                 if (ipv4_buf[i] != 0) {
9196                         /* add back host_ip entries from our local cache */
9197                         dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
9198                         DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
9199                                 __FUNCTION__, ipv4_buf[i], i));
9200                 }
9201         }
9202 #ifdef AOE_DBG
9203         /* see the resulting hostip table */
9204         dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9205         DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
9206         dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9207 #endif
9208 }
9209
9210 /*
9211  * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
9212  * whenever there is an event related to an IP address.
9213  * ptr : kernel provided pointer to IP address that has changed
9214  */
9215 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
9216         unsigned long event,
9217         void *ptr)
9218 {
9219         struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
9220
9221         dhd_info_t *dhd;
9222         dhd_pub_t *dhd_pub;
9223         int idx;
9224
9225         if (!dhd_arp_enable)
9226                 return NOTIFY_DONE;
9227         if (!ifa || !(ifa->ifa_dev->dev))
9228                 return NOTIFY_DONE;
9229
9230 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9231         /* Filter notifications meant for non Broadcom devices */
9232         if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
9233             (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
9234 #if defined(WL_ENABLE_P2P_IF)
9235                 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
9236 #endif /* WL_ENABLE_P2P_IF */
9237                         return NOTIFY_DONE;
9238         }
9239 #endif /* LINUX_VERSION_CODE */
9240
9241         dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
9242         if (!dhd)
9243                 return NOTIFY_DONE;
9244
9245         dhd_pub = &dhd->pub;
9246
9247         if (dhd_pub->arp_version == 1) {
9248                 idx = 0;
9249         } else {
9250                 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
9251                         if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
9252                         break;
9253                 }
9254                 if (idx < DHD_MAX_IFS) {
9255                         DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
9256                                 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
9257                 } else {
9258                         DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
9259                         idx = 0;
9260                 }
9261         }
9262
9263         switch (event) {
9264                 case NETDEV_UP:
9265                         DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
9266                                 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9267
9268                         if (dhd->pub.busstate != DHD_BUS_DATA) {
9269                                 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
9270                                 if (dhd->pend_ipaddr) {
9271                                         DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
9272                                                 __FUNCTION__, dhd->pend_ipaddr));
9273                                 }
9274                                 dhd->pend_ipaddr = ifa->ifa_address;
9275                                 break;
9276                         }
9277
9278 #ifdef AOE_IP_ALIAS_SUPPORT
9279                         DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
9280                                 __FUNCTION__));
9281                         aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
9282 #endif /* AOE_IP_ALIAS_SUPPORT */
9283                         break;
9284
9285                 case NETDEV_DOWN:
9286                         DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
9287                                 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9288                         dhd->pend_ipaddr = 0;
9289 #ifdef AOE_IP_ALIAS_SUPPORT
9290                         DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
9291                                 __FUNCTION__));
9292                         aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
9293 #else
9294                         dhd_aoe_hostip_clr(&dhd->pub, idx);
9295                         dhd_aoe_arp_clr(&dhd->pub, idx);
9296 #endif /* AOE_IP_ALIAS_SUPPORT */
9297                         break;
9298
9299                 default:
9300                         DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
9301                                 __func__, ifa->ifa_label, event));
9302                         break;
9303         }
9304         return NOTIFY_DONE;
9305 }
9306 #endif /* ARP_OFFLOAD_SUPPORT */
9307
9308 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9309 /* Neighbor Discovery Offload: defered handler */
9310 static void
9311 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
9312 {
9313         struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
9314         dhd_pub_t       *pub = &((dhd_info_t *)dhd_info)->pub;
9315         int             ret;
9316
9317         if (event != DHD_WQ_WORK_IPV6_NDO) {
9318                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9319                 return;
9320         }
9321
9322         if (!ndo_work) {
9323                 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
9324                 return;
9325         }
9326
9327         if (!pub) {
9328                 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
9329                 return;
9330         }
9331
9332         if (ndo_work->if_idx) {
9333                 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
9334                 return;
9335         }
9336
9337         switch (ndo_work->event) {
9338                 case NETDEV_UP:
9339                         DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
9340                         ret = dhd_ndo_enable(pub, TRUE);
9341                         if (ret < 0) {
9342                                 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
9343                         }
9344
9345                         ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
9346                         if (ret < 0) {
9347                                 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
9348                                         __FUNCTION__, ret));
9349                         }
9350                         break;
9351                 case NETDEV_DOWN:
9352                         DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
9353                         ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
9354                         if (ret < 0) {
9355                                 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
9356                                         __FUNCTION__, ret));
9357                                 goto done;
9358                         }
9359
9360                         ret = dhd_ndo_enable(pub, FALSE);
9361                         if (ret < 0) {
9362                                 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
9363                                 goto done;
9364                         }
9365                         break;
9366                 default:
9367                         DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
9368                         break;
9369         }
9370 done:
9371         /* free ndo_work. alloced while scheduling the work */
9372         kfree(ndo_work);
9373
9374         return;
9375 }
9376
9377 /*
9378  * Neighbor Discovery Offload: Called when an interface
9379  * is assigned with ipv6 address.
9380  * Handles only primary interface
9381  */
9382 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
9383         unsigned long event,
9384         void *ptr)
9385 {
9386         dhd_info_t *dhd;
9387         dhd_pub_t *dhd_pub;
9388         struct inet6_ifaddr *inet6_ifa = ptr;
9389         struct in6_addr *ipv6_addr = &inet6_ifa->addr;
9390         struct ipv6_work_info_t *ndo_info;
9391         int idx = 0; /* REVISIT */
9392
9393 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9394         /* Filter notifications meant for non Broadcom devices */
9395         if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
9396                         return NOTIFY_DONE;
9397         }
9398 #endif /* LINUX_VERSION_CODE */
9399
9400         dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
9401         if (!dhd)
9402                 return NOTIFY_DONE;
9403
9404         if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
9405                 return NOTIFY_DONE;
9406         dhd_pub = &dhd->pub;
9407
9408         if (!FW_SUPPORTED(dhd_pub, ndoe))
9409                 return NOTIFY_DONE;
9410
9411         ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
9412         if (!ndo_info) {
9413                 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
9414                 return NOTIFY_DONE;
9415         }
9416
9417         ndo_info->event = event;
9418         ndo_info->if_idx = idx;
9419         memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
9420
9421         /* defer the work to thread as it may block kernel */
9422         dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
9423                 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
9424         return NOTIFY_DONE;
9425 }
9426 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9427
9428 int
9429 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
9430 {
9431         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9432         dhd_if_t *ifp;
9433         struct net_device *net = NULL;
9434         int err = 0;
9435         uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
9436
9437         DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9438
9439         ASSERT(dhd && dhd->iflist[ifidx]);
9440         ifp = dhd->iflist[ifidx];
9441         net = ifp->net;
9442         ASSERT(net && (ifp->idx == ifidx));
9443
9444 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9445         ASSERT(!net->open);
9446         net->get_stats = dhd_get_stats;
9447         net->do_ioctl = dhd_ioctl_entry;
9448         net->hard_start_xmit = dhd_start_xmit;
9449         net->set_mac_address = dhd_set_mac_address;
9450         net->set_multicast_list = dhd_set_multicast_list;
9451         net->open = net->stop = NULL;
9452 #else
9453         ASSERT(!net->netdev_ops);
9454         net->netdev_ops = &dhd_ops_virt;
9455 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9456
9457         /* Ok, link into the network layer... */
9458         if (ifidx == 0) {
9459                 /*
9460                  * device functions for the primary interface only
9461                  */
9462 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9463                 net->open = dhd_open;
9464                 net->stop = dhd_stop;
9465 #else
9466                 net->netdev_ops = &dhd_ops_pri;
9467 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9468                 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
9469                         memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9470         } else {
9471                 /*
9472                  * We have to use the primary MAC for virtual interfaces
9473                  */
9474                 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
9475                 /*
9476                  * Android sets the locally administered bit to indicate that this is a
9477                  * portable hotspot.  This will not work in simultaneous AP/STA mode,
9478                  * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
9479                  */
9480                 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
9481                         ETHER_ADDR_LEN)) {
9482                         DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
9483                         __func__, net->name));
9484                         temp_addr[0] |= 0x02;
9485                 }
9486         }
9487
9488         net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
9489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
9490         net->ethtool_ops = &dhd_ethtool_ops;
9491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
9492
9493 #if defined(WL_WIRELESS_EXT)
9494 #if WIRELESS_EXT < 19
9495         net->get_wireless_stats = dhd_get_wireless_stats;
9496 #endif /* WIRELESS_EXT < 19 */
9497 #if WIRELESS_EXT > 12
9498         net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
9499 #endif /* WIRELESS_EXT > 12 */
9500 #endif /* defined(WL_WIRELESS_EXT) */
9501
9502         dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9503
9504         memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
9505
9506         if (ifidx == 0)
9507                 printf("%s\n", dhd_version);
9508
9509         if (need_rtnl_lock)
9510                 err = register_netdev(net);
9511         else
9512                 err = register_netdevice(net);
9513
9514         if (err != 0) {
9515                 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
9516                 goto fail;
9517         }
9518
9519
9520
9521         printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
9522 #if defined(CUSTOMER_HW4_DEBUG)
9523                 MAC2STRDBG(dhd->pub.mac.octet));
9524 #else
9525                 MAC2STRDBG(net->dev_addr));
9526 #endif /* CUSTOMER_HW4_DEBUG */
9527
9528 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
9529 //              wl_iw_iscan_set_scan_broadcast_prep(net, 1);
9530 #endif
9531
9532 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
9533         KERNEL_VERSION(2, 6, 27))))
9534         if (ifidx == 0) {
9535 #ifdef BCMLXSDMMC
9536                 up(&dhd_registration_sem);
9537 #endif /* BCMLXSDMMC */
9538                 if (!dhd_download_fw_on_driverload) {
9539 #ifdef WL_CFG80211
9540                         wl_terminate_event_handler();
9541 #endif /* WL_CFG80211 */
9542 #if defined(DHD_LB) && defined(DHD_LB_RXP)
9543                         __skb_queue_purge(&dhd->rx_pend_queue);
9544 #endif /* DHD_LB && DHD_LB_RXP */
9545 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
9546                         dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
9547 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
9548                         dhd_net_bus_devreset(net, TRUE);
9549 #ifdef BCMLXSDMMC
9550                         dhd_net_bus_suspend(net);
9551 #endif /* BCMLXSDMMC */
9552                         wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
9553                 }
9554         }
9555 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
9556         return 0;
9557
9558 fail:
9559 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
9560         net->open = NULL;
9561 #else
9562         net->netdev_ops = NULL;
9563 #endif
9564         return err;
9565 }
9566
9567 void
9568 dhd_bus_detach(dhd_pub_t *dhdp)
9569 {
9570         dhd_info_t *dhd;
9571
9572         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9573
9574         if (dhdp) {
9575                 dhd = (dhd_info_t *)dhdp->info;
9576                 if (dhd) {
9577
9578                         /*
9579                          * In case of Android cfg80211 driver, the bus is down in dhd_stop,
9580                          *  calling stop again will cuase SD read/write errors.
9581                          */
9582                         if (dhd->pub.busstate != DHD_BUS_DOWN) {
9583                                 /* Stop the protocol module */
9584                                 dhd_prot_stop(&dhd->pub);
9585
9586                                 /* Stop the bus module */
9587                                 dhd_bus_stop(dhd->pub.bus, TRUE);
9588                         }
9589
9590 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
9591                         dhd_bus_oob_intr_unregister(dhdp);
9592 #endif 
9593                 }
9594         }
9595 }
9596
9597
9598 void dhd_detach(dhd_pub_t *dhdp)
9599 {
9600         dhd_info_t *dhd;
9601         unsigned long flags;
9602         int timer_valid = FALSE;
9603         struct net_device *dev;
9604
9605         if (!dhdp)
9606                 return;
9607
9608         dhd = (dhd_info_t *)dhdp->info;
9609         if (!dhd)
9610                 return;
9611
9612         dev = dhd->iflist[0]->net;
9613
9614         if (dev) {
9615                 rtnl_lock();
9616                 if (dev->flags & IFF_UP) {
9617                         /* If IFF_UP is still up, it indicates that
9618                          * "ifconfig wlan0 down" hasn't been called.
9619                          * So invoke dev_close explicitly here to
9620                          * bring down the interface.
9621                          */
9622                         DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
9623                         dev_close(dev);
9624                 }
9625                 rtnl_unlock();
9626         }
9627
9628         DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
9629
9630         dhd->pub.up = 0;
9631         if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
9632                 /* Give sufficient time for threads to start running in case
9633                  * dhd_attach() has failed
9634                  */
9635                 OSL_SLEEP(100);
9636         }
9637
9638 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9639 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9640
9641 #ifdef PROP_TXSTATUS
9642 #ifdef DHD_WLFC_THREAD
9643         if (dhd->pub.wlfc_thread) {
9644                 kthread_stop(dhd->pub.wlfc_thread);
9645                 dhdp->wlfc_thread_go = TRUE;
9646                 wake_up_interruptible(&dhdp->wlfc_wqhead);
9647         }
9648         dhd->pub.wlfc_thread = NULL;
9649 #endif /* DHD_WLFC_THREAD */
9650 #endif /* PROP_TXSTATUS */
9651
9652         if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
9653
9654                 dhd_bus_detach(dhdp);
9655 #ifdef BCMPCIE
9656                 if (is_reboot == SYS_RESTART) {
9657                         extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
9658                         if (dhd_wifi_platdata && !dhdp->dongle_reset) {
9659                                 dhdpcie_bus_clock_stop(dhdp->bus);
9660                                 wifi_platform_set_power(dhd_wifi_platdata->adapters,
9661                                         FALSE, WIFI_TURNOFF_DELAY);
9662                         }
9663                 }
9664 #endif /* BCMPCIE */
9665 #ifndef PCIE_FULL_DONGLE
9666                 if (dhdp->prot)
9667                         dhd_prot_detach(dhdp);
9668 #endif
9669         }
9670
9671 #ifdef ARP_OFFLOAD_SUPPORT
9672         if (dhd_inetaddr_notifier_registered) {
9673                 dhd_inetaddr_notifier_registered = FALSE;
9674                 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
9675         }
9676 #endif /* ARP_OFFLOAD_SUPPORT */
9677 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9678         if (dhd_inet6addr_notifier_registered) {
9679                 dhd_inet6addr_notifier_registered = FALSE;
9680                 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
9681         }
9682 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9683 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9684         if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
9685                 if (dhd->early_suspend.suspend)
9686                         unregister_early_suspend(&dhd->early_suspend);
9687         }
9688 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9689
9690 #if defined(WL_WIRELESS_EXT)
9691         if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
9692                 /* Detatch and unlink in the iw */
9693                 wl_iw_detach();
9694         }
9695 #ifdef WL_ESCAN
9696         wl_escan_detach();
9697 #endif
9698 #endif /* defined(WL_WIRELESS_EXT) */
9699
9700         /* delete all interfaces, start with virtual  */
9701         if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
9702                 int i = 1;
9703                 dhd_if_t *ifp;
9704
9705                 /* Cleanup virtual interfaces */
9706                 dhd_net_if_lock_local(dhd);
9707                 for (i = 1; i < DHD_MAX_IFS; i++) {
9708                         if (dhd->iflist[i])
9709                                 dhd_remove_if(&dhd->pub, i, TRUE);
9710                 }
9711                 dhd_net_if_unlock_local(dhd);
9712
9713                 /*  delete primary interface 0 */
9714                 ifp = dhd->iflist[0];
9715                 ASSERT(ifp);
9716                 ASSERT(ifp->net);
9717                 if (ifp && ifp->net) {
9718
9719
9720
9721                         /* in unregister_netdev case, the interface gets freed by net->destructor
9722                          * (which is set to free_netdev)
9723                          */
9724                         if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9725                                 free_netdev(ifp->net);
9726                         } else {
9727 #ifdef SET_RPS_CPUS
9728                                 custom_rps_map_clear(ifp->net->_rx);
9729 #endif /* SET_RPS_CPUS */
9730                                 netif_tx_disable(ifp->net);
9731                                 unregister_netdev(ifp->net);
9732                         }
9733                         ifp->net = NULL;
9734 #ifdef DHD_WMF
9735                         dhd_wmf_cleanup(dhdp, 0);
9736 #endif /* DHD_WMF */
9737 #ifdef DHD_L2_FILTER
9738                         bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
9739                                 NULL, FALSE, dhdp->tickcnt);
9740                         deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
9741                         ifp->phnd_arp_table = NULL;
9742 #endif /* DHD_L2_FILTER */
9743
9744                         dhd_if_del_sta_list(ifp);
9745
9746                         MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
9747                         dhd->iflist[0] = NULL;
9748                 }
9749         }
9750
9751         /* Clear the watchdog timer */
9752         DHD_GENERAL_LOCK(&dhd->pub, flags);
9753         timer_valid = dhd->wd_timer_valid;
9754         dhd->wd_timer_valid = FALSE;
9755         DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9756         if (timer_valid)
9757                 del_timer_sync(&dhd->timer);
9758         DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9759
9760         if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
9761 #ifdef DHD_PCIE_RUNTIMEPM
9762                 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
9763                         PROC_STOP(&dhd->thr_rpm_ctl);
9764                 }
9765 #endif /* DHD_PCIE_RUNTIMEPM */
9766                 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
9767                         PROC_STOP(&dhd->thr_wdt_ctl);
9768                 }
9769
9770                 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
9771                         PROC_STOP(&dhd->thr_rxf_ctl);
9772                 }
9773
9774                 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
9775                         PROC_STOP(&dhd->thr_dpc_ctl);
9776                 } else {
9777                         tasklet_kill(&dhd->tasklet);
9778 #ifdef DHD_LB_RXP
9779                         __skb_queue_purge(&dhd->rx_pend_queue);
9780 #endif /* DHD_LB_RXP */
9781                 }
9782         }
9783
9784 #if defined(DHD_LB)
9785         /* Kill the Load Balancing Tasklets */
9786 #if defined(DHD_LB_TXC)
9787         tasklet_disable(&dhd->tx_compl_tasklet);
9788         tasklet_kill(&dhd->tx_compl_tasklet);
9789 #endif /* DHD_LB_TXC */
9790 #if defined(DHD_LB_RXC)
9791         tasklet_disable(&dhd->rx_compl_tasklet);
9792         tasklet_kill(&dhd->rx_compl_tasklet);
9793 #endif /* DHD_LB_RXC */
9794         if (dhd->cpu_notifier.notifier_call != NULL)
9795                 unregister_cpu_notifier(&dhd->cpu_notifier);
9796         dhd_cpumasks_deinit(dhd);
9797 #endif /* DHD_LB */
9798
9799 #ifdef DHD_LOG_DUMP
9800         dhd_log_dump_deinit(&dhd->pub);
9801 #endif /* DHD_LOG_DUMP */
9802 #ifdef WL_CFG80211
9803         if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
9804                 wl_cfg80211_detach(NULL);
9805                 dhd_monitor_uninit();
9806         }
9807 #endif
9808         /* free deferred work queue */
9809         dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
9810         dhd->dhd_deferred_wq = NULL;
9811
9812 #ifdef SHOW_LOGTRACE
9813         if (dhd->event_data.fmts)
9814                 kfree(dhd->event_data.fmts);
9815         if (dhd->event_data.raw_fmts)
9816                 kfree(dhd->event_data.raw_fmts);
9817         if (dhd->event_data.raw_sstr)
9818                 kfree(dhd->event_data.raw_sstr);
9819 #endif /* SHOW_LOGTRACE */
9820
9821 #ifdef PNO_SUPPORT
9822         if (dhdp->pno_state)
9823                 dhd_pno_deinit(dhdp);
9824 #endif
9825 #if defined(CONFIG_PM_SLEEP)
9826         if (dhd_pm_notifier_registered) {
9827                 unregister_pm_notifier(&dhd->pm_notifier);
9828                 dhd_pm_notifier_registered = FALSE;
9829         }
9830 #endif /* CONFIG_PM_SLEEP */
9831
9832 #ifdef DEBUG_CPU_FREQ
9833                 if (dhd->new_freq)
9834                         free_percpu(dhd->new_freq);
9835                 dhd->new_freq = NULL;
9836                 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9837 #endif
9838         if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
9839                 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
9840 #ifdef CONFIG_HAS_WAKELOCK
9841                 dhd->wakelock_wd_counter = 0;
9842                 wake_lock_destroy(&dhd->wl_wdwake);
9843 #endif /* CONFIG_HAS_WAKELOCK */
9844                 DHD_OS_WAKE_LOCK_DESTROY(dhd);
9845         }
9846
9847
9848
9849 #ifdef DHDTCPACK_SUPPRESS
9850         /* This will free all MEM allocated for TCPACK SUPPRESS */
9851         dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9852 #endif /* DHDTCPACK_SUPPRESS */
9853
9854 #ifdef PCIE_FULL_DONGLE
9855                 dhd_flow_rings_deinit(dhdp);
9856                 if (dhdp->prot)
9857                         dhd_prot_detach(dhdp);
9858 #endif
9859
9860
9861         dhd_sysfs_exit(dhd);
9862         dhd->pub.is_fw_download_done = FALSE;
9863         dhd_conf_detach(dhdp);
9864 }
9865
9866
9867 void
9868 dhd_free(dhd_pub_t *dhdp)
9869 {
9870         dhd_info_t *dhd;
9871         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9872
9873         if (dhdp) {
9874                 int i;
9875                 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9876                         if (dhdp->reorder_bufs[i]) {
9877                                 reorder_info_t *ptr;
9878                                 uint32 buf_size = sizeof(struct reorder_info);
9879
9880                                 ptr = dhdp->reorder_bufs[i];
9881
9882                                 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9883                                 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9884                                         i, ptr->max_idx, buf_size));
9885
9886                                 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9887                                 dhdp->reorder_bufs[i] = NULL;
9888                         }
9889                 }
9890
9891                 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
9892
9893                 dhd = (dhd_info_t *)dhdp->info;
9894                 if (dhdp->soc_ram) {
9895 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9896                         DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9897 #else
9898                         MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9899 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9900                         dhdp->soc_ram = NULL;
9901                 }
9902 #ifdef CACHE_FW_IMAGES
9903                 if (dhdp->cached_fw) {
9904                         MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
9905                         dhdp->cached_fw = NULL;
9906                 }
9907
9908                 if (dhdp->cached_nvram) {
9909                         MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
9910                         dhdp->cached_nvram = NULL;
9911                 }
9912 #endif
9913                 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
9914                 if (dhd &&
9915                         dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
9916                         MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
9917                 dhd = NULL;
9918         }
9919 }
9920
9921 void
9922 dhd_clear(dhd_pub_t *dhdp)
9923 {
9924         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9925
9926         if (dhdp) {
9927                 int i;
9928 #ifdef DHDTCPACK_SUPPRESS
9929                 /* Clean up timer/data structure for any remaining/pending packet or timer. */
9930                 dhd_tcpack_info_tbl_clean(dhdp);
9931 #endif /* DHDTCPACK_SUPPRESS */
9932                 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9933                         if (dhdp->reorder_bufs[i]) {
9934                                 reorder_info_t *ptr;
9935                                 uint32 buf_size = sizeof(struct reorder_info);
9936
9937                                 ptr = dhdp->reorder_bufs[i];
9938
9939                                 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9940                                 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9941                                         i, ptr->max_idx, buf_size));
9942
9943                                 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9944                                 dhdp->reorder_bufs[i] = NULL;
9945                         }
9946                 }
9947
9948                 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
9949
9950                 if (dhdp->soc_ram) {
9951 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9952                         DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9953 #else
9954                         MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9955 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9956                         dhdp->soc_ram = NULL;
9957                 }
9958         }
9959 }
9960
9961 static void
9962 dhd_module_cleanup(void)
9963 {
9964         printf("%s: Enter\n", __FUNCTION__);
9965
9966         dhd_bus_unregister();
9967
9968         wl_android_exit();
9969
9970         dhd_wifi_platform_unregister_drv();
9971         printf("%s: Exit\n", __FUNCTION__);
9972 }
9973
9974 static void
9975 dhd_module_exit(void)
9976 {
9977         dhd_buzzz_detach();
9978         dhd_module_cleanup();
9979         unregister_reboot_notifier(&dhd_reboot_notifier);
9980 }
9981
9982 static int
9983 dhd_module_init(void)
9984 {
9985         int err;
9986         int retry = 0;
9987
9988         printf("%s: in\n", __FUNCTION__);
9989
9990         dhd_buzzz_attach();
9991
9992         DHD_PERIM_RADIO_INIT();
9993
9994
9995         if (firmware_path[0] != '\0') {
9996                 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
9997                 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
9998         }
9999
10000         if (nvram_path[0] != '\0') {
10001                 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
10002                 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10003         }
10004
10005         do {
10006                 err = dhd_wifi_platform_register_drv();
10007                 if (!err) {
10008                         register_reboot_notifier(&dhd_reboot_notifier);
10009                         break;
10010                 }
10011                 else {
10012                         DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
10013                                 __FUNCTION__, retry));
10014                         strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
10015                         firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
10016                         strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
10017                         nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
10018                 }
10019         } while (retry--);
10020
10021         if (err) {
10022                 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
10023         } else {
10024                 if (!dhd_download_fw_on_driverload) {
10025                         dhd_driver_init_done = TRUE;
10026                 }
10027         }
10028
10029         printf("%s: Exit err=%d\n", __FUNCTION__, err);
10030         return err;
10031 }
10032
10033 static int
10034 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
10035 {
10036         DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
10037         if (code == SYS_RESTART) {
10038 #ifdef BCMPCIE
10039                 is_reboot = code;
10040 #endif /* BCMPCIE */
10041         }
10042         return NOTIFY_DONE;
10043 }
10044
10045 static int wifi_init_thread(void *data)
10046 {
10047         dhd_module_init();
10048
10049         return 0;
10050 }
10051
10052 int __init rockchip_wifi_init_module_rkwifi(void)
10053 {
10054         struct task_struct *kthread = NULL;
10055
10056         kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
10057         if (IS_ERR(kthread))
10058                 pr_err("create wifi_init_thread failed.\n");
10059
10060         return 0;
10061 }
10062
10063 void __exit rockchip_wifi_exit_module_rkwifi(void)
10064 {
10065         dhd_module_exit();
10066 }
10067
10068 late_initcall(rockchip_wifi_init_module_rkwifi);
10069 module_exit(rockchip_wifi_exit_module_rkwifi);
10070
10071 #if 0
10072 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
10073 #if defined(CONFIG_DEFERRED_INITCALLS)
10074 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
10075         defined(CONFIG_ARCH_MSM8996)
10076 deferred_module_init_sync(dhd_module_init);
10077 #else
10078 deferred_module_init(dhd_module_init);
10079 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
10080         * CONFIG_ARCH_MSM8996
10081         */
10082 #elif defined(USE_LATE_INITCALL_SYNC)
10083 late_initcall_sync(dhd_module_init);
10084 #else
10085 late_initcall(dhd_module_init);
10086 #endif /* USE_LATE_INITCALL_SYNC */
10087 #else
10088 module_init(dhd_module_init);
10089 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
10090
10091 module_exit(dhd_module_exit);
10092
10093 #endif
10094 /*
10095  * OS specific functions required to implement DHD driver in OS independent way
10096  */
10097 int
10098 dhd_os_proto_block(dhd_pub_t *pub)
10099 {
10100         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10101
10102         if (dhd) {
10103                 DHD_PERIM_UNLOCK(pub);
10104
10105                 down(&dhd->proto_sem);
10106
10107                 DHD_PERIM_LOCK(pub);
10108                 return 1;
10109         }
10110
10111         return 0;
10112 }
10113
10114 int
10115 dhd_os_proto_unblock(dhd_pub_t *pub)
10116 {
10117         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10118
10119         if (dhd) {
10120                 up(&dhd->proto_sem);
10121                 return 1;
10122         }
10123
10124         return 0;
10125 }
10126
10127 void
10128 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
10129 {
10130         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10131
10132         if (dhd) {
10133                 mutex_lock(&dhd->dhd_iovar_mutex);
10134         }
10135 }
10136
10137 void
10138 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
10139 {
10140         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10141
10142         if (dhd) {
10143                 mutex_unlock(&dhd->dhd_iovar_mutex);
10144         }
10145 }
10146
10147 unsigned int
10148 dhd_os_get_ioctl_resp_timeout(void)
10149 {
10150         return ((unsigned int)dhd_ioctl_timeout_msec);
10151 }
10152
10153 void
10154 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
10155 {
10156         dhd_ioctl_timeout_msec = (int)timeout_msec;
10157 }
10158
10159 int
10160 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
10161 {
10162         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10163         int timeout;
10164
10165         /* Convert timeout in millsecond to jiffies */
10166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10167         timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10168 #else
10169         timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10170 #endif
10171
10172         DHD_PERIM_UNLOCK(pub);
10173
10174         timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
10175
10176         DHD_PERIM_LOCK(pub);
10177
10178         return timeout;
10179 }
10180
10181 int
10182 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
10183 {
10184         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10185
10186         wake_up(&dhd->ioctl_resp_wait);
10187         return 0;
10188 }
10189
10190 int
10191 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
10192 {
10193         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10194         int timeout;
10195
10196         /* Convert timeout in millsecond to jiffies */
10197 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10198         timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10199 #else
10200         timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10201 #endif
10202
10203         DHD_PERIM_UNLOCK(pub);
10204
10205         timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
10206
10207         DHD_PERIM_LOCK(pub);
10208
10209         return timeout;
10210 }
10211
10212 int
10213 dhd_os_d3ack_wake(dhd_pub_t *pub)
10214 {
10215         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10216
10217         wake_up(&dhd->d3ack_wait);
10218         return 0;
10219 }
10220
10221 int
10222 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
10223 {
10224         dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10225         int timeout;
10226
10227         /* Wait for bus usage contexts to gracefully exit within some timeout value
10228          * Set time out to little higher than dhd_ioctl_timeout_msec,
10229          * so that IOCTL timeout should not get affected.
10230          */
10231         /* Convert timeout in millsecond to jiffies */
10232 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10233         timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
10234 #else
10235         timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
10236 #endif
10237
10238         timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
10239
10240         return timeout;
10241 }
10242
10243 int INLINE
10244 dhd_os_busbusy_wake(dhd_pub_t *pub)
10245 {
10246         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10247         /* Call wmb() to make sure before waking up the other event value gets updated */
10248         OSL_SMP_WMB();
10249         wake_up(&dhd->dhd_bus_busy_state_wait);
10250         return 0;
10251 }
10252
10253 void
10254 dhd_os_wd_timer_extend(void *bus, bool extend)
10255 {
10256         dhd_pub_t *pub = bus;
10257         dhd_info_t *dhd = (dhd_info_t *)pub->info;
10258
10259         if (extend)
10260                 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
10261         else
10262                 dhd_os_wd_timer(bus, dhd->default_wd_interval);
10263 }
10264
10265
10266 void
10267 dhd_os_wd_timer(void *bus, uint wdtick)
10268 {
10269         dhd_pub_t *pub = bus;
10270         dhd_info_t *dhd = (dhd_info_t *)pub->info;
10271         unsigned long flags;
10272
10273         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10274
10275         if (!dhd) {
10276                 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10277                 return;
10278         }
10279
10280         DHD_OS_WD_WAKE_LOCK(pub);
10281         DHD_GENERAL_LOCK(pub, flags);
10282
10283         /* don't start the wd until fw is loaded */
10284         if (pub->busstate == DHD_BUS_DOWN) {
10285                 DHD_GENERAL_UNLOCK(pub, flags);
10286                 if (!wdtick)
10287                         DHD_OS_WD_WAKE_UNLOCK(pub);
10288                 return;
10289         }
10290
10291         /* Totally stop the timer */
10292         if (!wdtick && dhd->wd_timer_valid == TRUE) {
10293                 dhd->wd_timer_valid = FALSE;
10294                 DHD_GENERAL_UNLOCK(pub, flags);
10295                 del_timer_sync(&dhd->timer);
10296                 DHD_OS_WD_WAKE_UNLOCK(pub);
10297                 return;
10298         }
10299
10300         if (wdtick) {
10301                 DHD_OS_WD_WAKE_LOCK(pub);
10302                 dhd_watchdog_ms = (uint)wdtick;
10303                 /* Re arm the timer, at last watchdog period */
10304                 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
10305                 dhd->wd_timer_valid = TRUE;
10306         }
10307         DHD_GENERAL_UNLOCK(pub, flags);
10308         DHD_OS_WD_WAKE_UNLOCK(pub);
10309 }
10310
10311 #ifdef DHD_PCIE_RUNTIMEPM
10312 void
10313 dhd_os_runtimepm_timer(void *bus, uint tick)
10314 {
10315         dhd_pub_t *pub = bus;
10316         dhd_info_t *dhd = (dhd_info_t *)pub->info;
10317         unsigned long flags;
10318
10319         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10320
10321         if (!dhd) {
10322                 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
10323                 return;
10324         }
10325
10326         DHD_GENERAL_LOCK(pub, flags);
10327
10328         /* don't start the RPM until fw is loaded */
10329         if (pub->busstate == DHD_BUS_DOWN ||
10330                         pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
10331                 DHD_GENERAL_UNLOCK(pub, flags);
10332                 return;
10333         }
10334
10335         /* If tick is non-zero, the request is to start the timer */
10336         if (tick) {
10337                 /* Start the timer only if its not already running */
10338                 if (dhd->rpm_timer_valid == FALSE) {
10339                         mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
10340                         dhd->rpm_timer_valid = TRUE;
10341                 }
10342         } else {
10343                 /* tick is zero, we have to stop the timer */
10344                 /* Stop the timer only if its running, otherwise we don't have to do anything */
10345                 if (dhd->rpm_timer_valid == TRUE) {
10346                         dhd->rpm_timer_valid = FALSE;
10347                         DHD_GENERAL_UNLOCK(pub, flags);
10348                         del_timer_sync(&dhd->rpm_timer);
10349                         /* we have already released the lock, so just go to exit */
10350                         goto exit;
10351                 }
10352         }
10353
10354         DHD_GENERAL_UNLOCK(pub, flags);
10355 exit:
10356         return;
10357
10358 }
10359
10360 #endif /* DHD_PCIE_RUNTIMEPM */
10361
10362 void *
10363 dhd_os_open_image(char *filename)
10364 {
10365         struct file *fp;
10366         int size;
10367
10368         fp = filp_open(filename, O_RDONLY, 0);
10369         /*
10370          * 2.6.11 (FC4) supports filp_open() but later revs don't?
10371          * Alternative:
10372          * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
10373          * ???
10374          */
10375          if (IS_ERR(fp)) {
10376                  fp = NULL;
10377                  goto err;
10378          }
10379
10380          if (!S_ISREG(file_inode(fp)->i_mode)) {
10381                  DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
10382                  fp = NULL;
10383                  goto err;
10384          }
10385
10386          size = i_size_read(file_inode(fp));
10387          if (size <= 0) {
10388                  DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
10389                  fp = NULL;
10390                  goto err;
10391          }
10392
10393          DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
10394
10395 err:
10396          return fp;
10397 }
10398
10399 int
10400 dhd_os_get_image_block(char *buf, int len, void *image)
10401 {
10402         struct file *fp = (struct file *)image;
10403         int rdlen;
10404         int size;
10405
10406         if (!image)
10407                 return 0;
10408
10409         size = i_size_read(file_inode(fp));
10410         rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
10411
10412         if (len >= size && size != rdlen) {
10413                 return -EIO;
10414         }
10415
10416         if (rdlen > 0)
10417                 fp->f_pos += rdlen;
10418
10419         return rdlen;
10420 }
10421
10422 void
10423 dhd_os_close_image(void *image)
10424 {
10425         if (image)
10426                 filp_close((struct file *)image, NULL);
10427 }
10428
10429 void
10430 dhd_os_sdlock(dhd_pub_t *pub)
10431 {
10432         dhd_info_t *dhd;
10433
10434         dhd = (dhd_info_t *)(pub->info);
10435
10436         if (dhd_dpc_prio >= 0)
10437                 down(&dhd->sdsem);
10438         else
10439                 spin_lock_bh(&dhd->sdlock);
10440 }
10441
10442 void
10443 dhd_os_sdunlock(dhd_pub_t *pub)
10444 {
10445         dhd_info_t *dhd;
10446
10447         dhd = (dhd_info_t *)(pub->info);
10448
10449         if (dhd_dpc_prio >= 0)
10450                 up(&dhd->sdsem);
10451         else
10452                 spin_unlock_bh(&dhd->sdlock);
10453 }
10454
10455 void
10456 dhd_os_sdlock_txq(dhd_pub_t *pub)
10457 {
10458         dhd_info_t *dhd;
10459
10460         dhd = (dhd_info_t *)(pub->info);
10461         spin_lock_bh(&dhd->txqlock);
10462 }
10463
10464 void
10465 dhd_os_sdunlock_txq(dhd_pub_t *pub)
10466 {
10467         dhd_info_t *dhd;
10468
10469         dhd = (dhd_info_t *)(pub->info);
10470         spin_unlock_bh(&dhd->txqlock);
10471 }
10472
10473 void
10474 dhd_os_sdlock_rxq(dhd_pub_t *pub)
10475 {
10476 }
10477
10478 void
10479 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
10480 {
10481 }
10482
10483 static void
10484 dhd_os_rxflock(dhd_pub_t *pub)
10485 {
10486         dhd_info_t *dhd;
10487
10488         dhd = (dhd_info_t *)(pub->info);
10489         spin_lock_bh(&dhd->rxf_lock);
10490
10491 }
10492
10493 static void
10494 dhd_os_rxfunlock(dhd_pub_t *pub)
10495 {
10496         dhd_info_t *dhd;
10497
10498         dhd = (dhd_info_t *)(pub->info);
10499         spin_unlock_bh(&dhd->rxf_lock);
10500 }
10501
10502 #ifdef DHDTCPACK_SUPPRESS
10503 unsigned long
10504 dhd_os_tcpacklock(dhd_pub_t *pub)
10505 {
10506         dhd_info_t *dhd;
10507         unsigned long flags = 0;
10508
10509         dhd = (dhd_info_t *)(pub->info);
10510
10511         if (dhd) {
10512 #ifdef BCMSDIO
10513                 spin_lock_bh(&dhd->tcpack_lock);
10514 #else
10515                 spin_lock_irqsave(&dhd->tcpack_lock, flags);
10516 #endif /* BCMSDIO */
10517         }
10518
10519         return flags;
10520 }
10521
10522 void
10523 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
10524 {
10525         dhd_info_t *dhd;
10526
10527 #ifdef BCMSDIO
10528         BCM_REFERENCE(flags);
10529 #endif /* BCMSDIO */
10530
10531         dhd = (dhd_info_t *)(pub->info);
10532
10533         if (dhd) {
10534 #ifdef BCMSDIO
10535                 spin_lock_bh(&dhd->tcpack_lock);
10536 #else
10537                 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
10538 #endif /* BCMSDIO */
10539         }
10540 }
10541 #endif /* DHDTCPACK_SUPPRESS */
10542
10543 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
10544 {
10545         uint8* buf;
10546         gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
10547
10548         buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
10549         if (buf == NULL && kmalloc_if_fail)
10550                 buf = kmalloc(size, flags);
10551
10552         return buf;
10553 }
10554
10555 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
10556 {
10557 }
10558
10559 #if defined(WL_WIRELESS_EXT)
10560 struct iw_statistics *
10561 dhd_get_wireless_stats(struct net_device *dev)
10562 {
10563         int res = 0;
10564         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10565
10566         if (!dhd->pub.up) {
10567                 return NULL;
10568         }
10569
10570         res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
10571
10572         if (res == 0)
10573                 return &dhd->iw.wstats;
10574         else
10575                 return NULL;
10576 }
10577 #endif /* defined(WL_WIRELESS_EXT) */
10578
10579 static int
10580 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
10581         wl_event_msg_t *event, void **data)
10582 {
10583         int bcmerror = 0;
10584         ASSERT(dhd != NULL);
10585
10586 #ifdef SHOW_LOGTRACE
10587                 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
10588 #else
10589                 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
10590 #endif /* SHOW_LOGTRACE */
10591
10592         if (bcmerror != BCME_OK)
10593                 return (bcmerror);
10594
10595 #if defined(WL_WIRELESS_EXT)
10596         if (event->bsscfgidx == 0) {
10597                 /*
10598                  * Wireless ext is on primary interface only
10599                  */
10600
10601         ASSERT(dhd->iflist[*ifidx] != NULL);
10602         ASSERT(dhd->iflist[*ifidx]->net != NULL);
10603
10604                 if (dhd->iflist[*ifidx]->net) {
10605                 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
10606                 }
10607         }
10608 #endif /* defined(WL_WIRELESS_EXT)  */
10609
10610 #ifdef WL_CFG80211
10611         ASSERT(dhd->iflist[*ifidx] != NULL);
10612         ASSERT(dhd->iflist[*ifidx]->net != NULL);
10613         if (dhd->iflist[*ifidx]->net)
10614                 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
10615 #endif /* defined(WL_CFG80211) */
10616
10617         return (bcmerror);
10618 }
10619
10620 /* send up locally generated event */
10621 void
10622 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
10623 {
10624         switch (ntoh32(event->event_type)) {
10625
10626         default:
10627                 break;
10628         }
10629 }
10630
10631 #ifdef LOG_INTO_TCPDUMP
10632 void
10633 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
10634 {
10635         struct sk_buff *p, *skb;
10636         uint32 pktlen;
10637         int len;
10638         dhd_if_t *ifp;
10639         dhd_info_t *dhd;
10640         uchar *skb_data;
10641         int ifidx = 0;
10642         struct ether_header eth;
10643
10644         pktlen = sizeof(eth) + data_len;
10645         dhd = dhdp->info;
10646
10647         if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
10648                 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
10649
10650                 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
10651                 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
10652                 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
10653                 eth.ether_type = hton16(ETHER_TYPE_BRCM);
10654
10655                 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
10656                 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
10657                 skb = PKTTONATIVE(dhdp->osh, p);
10658                 skb_data = skb->data;
10659                 len = skb->len;
10660
10661                 ifidx = dhd_ifname2idx(dhd, "wlan0");
10662                 ifp = dhd->iflist[ifidx];
10663                 if (ifp == NULL)
10664                          ifp = dhd->iflist[0];
10665
10666                 ASSERT(ifp);
10667                 skb->dev = ifp->net;
10668                 skb->protocol = eth_type_trans(skb, skb->dev);
10669                 skb->data = skb_data;
10670                 skb->len = len;
10671
10672                 /* Strip header, count, deliver upward */
10673                 skb_pull(skb, ETH_HLEN);
10674
10675                 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
10676                         __FUNCTION__, __LINE__);
10677                 /* Send the packet */
10678                 if (in_interrupt()) {
10679                         netif_rx(skb);
10680                 } else {
10681                         netif_rx_ni(skb);
10682                 }
10683         }
10684         else {
10685                 /* Could not allocate a sk_buf */
10686                 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
10687         }
10688 }
10689 #endif /* LOG_INTO_TCPDUMP */
10690
10691 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
10692 {
10693 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10694         struct dhd_info *dhdinfo =  dhd->info;
10695
10696 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10697         int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
10698 #else
10699         int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
10700 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
10701
10702         dhd_os_sdunlock(dhd);
10703         wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
10704         dhd_os_sdlock(dhd);
10705 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
10706         return;
10707 }
10708
10709 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
10710 {
10711 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10712         struct dhd_info *dhdinfo =  dhd->info;
10713         if (waitqueue_active(&dhdinfo->ctrl_wait))
10714                 wake_up(&dhdinfo->ctrl_wait);
10715 #endif
10716         return;
10717 }
10718
10719 #if defined(BCMSDIO) || defined(BCMPCIE)
10720 int
10721 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
10722 {
10723         int ret;
10724
10725         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10726
10727         if (flag == TRUE) {
10728                 /* Issue wl down command before resetting the chip */
10729                 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
10730                         DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
10731                 }
10732 #ifdef PROP_TXSTATUS
10733                 if (dhd->pub.wlfc_enabled)
10734                         dhd_wlfc_deinit(&dhd->pub);
10735 #endif /* PROP_TXSTATUS */
10736 #ifdef PNO_SUPPORT
10737         if (dhd->pub.pno_state)
10738                 dhd_pno_deinit(&dhd->pub);
10739 #endif
10740         }
10741
10742 #ifdef BCMSDIO
10743         if (!flag) {
10744                 dhd_update_fw_nv_path(dhd);
10745                 /* update firmware and nvram path to sdio bus */
10746                 dhd_bus_update_fw_nv_path(dhd->pub.bus,
10747                         dhd->fw_path, dhd->nv_path, dhd->conf_path);
10748         }
10749 #endif /* BCMSDIO */
10750
10751         ret = dhd_bus_devreset(&dhd->pub, flag);
10752         if (ret) {
10753                 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
10754                 return ret;
10755         }
10756
10757         return ret;
10758 }
10759
10760 #ifdef BCMSDIO
10761 int
10762 dhd_net_bus_suspend(struct net_device *dev)
10763 {
10764         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10765         return dhd_bus_suspend(&dhd->pub);
10766 }
10767
10768 int
10769 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
10770 {
10771         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10772         return dhd_bus_resume(&dhd->pub, stage);
10773 }
10774
10775 #endif /* BCMSDIO */
10776 #endif /* BCMSDIO || BCMPCIE */
10777
10778 int net_os_set_suspend_disable(struct net_device *dev, int val)
10779 {
10780         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10781         int ret = 0;
10782
10783         if (dhd) {
10784                 ret = dhd->pub.suspend_disable_flag;
10785                 dhd->pub.suspend_disable_flag = val;
10786         }
10787         return ret;
10788 }
10789
10790 int net_os_set_suspend(struct net_device *dev, int val, int force)
10791 {
10792         int ret = 0;
10793         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10794
10795         if (dhd) {
10796 #ifdef CONFIG_MACH_UNIVERSAL7420
10797 #endif /* CONFIG_MACH_UNIVERSAL7420 */
10798 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
10799                 ret = dhd_set_suspend(val, &dhd->pub);
10800 #else
10801                 ret = dhd_suspend_resume_helper(dhd, val, force);
10802 #endif
10803 #ifdef WL_CFG80211
10804                 wl_cfg80211_update_power_mode(dev);
10805 #endif
10806         }
10807         return ret;
10808 }
10809
10810 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
10811 {
10812         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10813
10814         if (dhd)
10815                 dhd->pub.suspend_bcn_li_dtim = val;
10816
10817         return 0;
10818 }
10819
10820 #ifdef PKT_FILTER_SUPPORT
10821 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
10822 {
10823 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
10824         return 0;
10825 #else
10826         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10827         char *filterp = NULL;
10828         int filter_id = 0;
10829         int ret = 0;
10830
10831         if (!dhd_master_mode)
10832                 add_remove = !add_remove;
10833         DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
10834         if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
10835                 return ret;
10836         if (num >= dhd->pub.pktfilter_count)
10837                 return -EINVAL;
10838         switch (num) {
10839                 case DHD_BROADCAST_FILTER_NUM:
10840                         filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
10841                         filter_id = 101;
10842                         break;
10843                 case DHD_MULTICAST4_FILTER_NUM:
10844                         filterp = "102 0 0 0 0xFFFFFF 0x01005E";
10845                         filter_id = 102;
10846                         break;
10847                 case DHD_MULTICAST6_FILTER_NUM:
10848                         filterp = "103 0 0 0 0xFFFF 0x3333";
10849                         filter_id = 103;
10850                         break;
10851                 case DHD_MDNS_FILTER_NUM:
10852                         filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
10853                         filter_id = 104;
10854                         break;
10855                 default:
10856                         return -EINVAL;
10857         }
10858
10859         /* Add filter */
10860         if (add_remove) {
10861                 dhd->pub.pktfilter[num] = filterp;
10862                 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
10863         } else { /* Delete filter */
10864                 if (dhd->pub.pktfilter[num] != NULL) {
10865                         dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
10866                         dhd->pub.pktfilter[num] = NULL;
10867                 }
10868         }
10869         return ret;
10870 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
10871 }
10872
10873 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
10874
10875 {
10876         int ret = 0;
10877
10878         /* Packet filtering is set only if we still in early-suspend and
10879          * we need either to turn it ON or turn it OFF
10880          * We can always turn it OFF in case of early-suspend, but we turn it
10881          * back ON only if suspend_disable_flag was not set
10882         */
10883         if (dhdp && dhdp->up) {
10884                 if (dhdp->in_suspend) {
10885                         if (!val || (val && !dhdp->suspend_disable_flag))
10886                                 dhd_enable_packet_filter(val, dhdp);
10887                 }
10888         }
10889         return ret;
10890 }
10891
10892 /* function to enable/disable packet for Network device */
10893 int net_os_enable_packet_filter(struct net_device *dev, int val)
10894 {
10895         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10896
10897         DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
10898         return dhd_os_enable_packet_filter(&dhd->pub, val);
10899 }
10900 #endif /* PKT_FILTER_SUPPORT */
10901
10902 int
10903 dhd_dev_init_ioctl(struct net_device *dev)
10904 {
10905         dhd_info_t *dhd = DHD_DEV_INFO(dev);
10906         int ret;
10907
10908         if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
10909                 goto done;
10910
10911 done:
10912         return ret;
10913 }
10914
10915 int
10916 dhd_dev_get_feature_set(struct net_device *dev)
10917 {
10918         dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
10919         dhd_pub_t *dhd = (&ptr->pub);
10920         int feature_set = 0;
10921
10922 #ifdef DYNAMIC_SWOOB_DURATION
10923 #ifndef CUSTOM_INTR_WIDTH
10924 #define CUSTOM_INTR_WIDTH 100
10925         int intr_width = 0;
10926 #endif /* CUSTOM_INTR_WIDTH */
10927 #endif /* DYNAMIC_SWOOB_DURATION */
10928         if (!dhd)
10929                 return feature_set;
10930
10931         if (FW_SUPPORTED(dhd, sta))
10932                 feature_set |= WIFI_FEATURE_INFRA;
10933         if (FW_SUPPORTED(dhd, dualband))
10934                 feature_set |= WIFI_FEATURE_INFRA_5G;
10935         if (FW_SUPPORTED(dhd, p2p))
10936                 feature_set |= WIFI_FEATURE_P2P;
10937         if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
10938                 feature_set |= WIFI_FEATURE_SOFT_AP;
10939         if (FW_SUPPORTED(dhd, tdls))
10940                 feature_set |= WIFI_FEATURE_TDLS;
10941         if (FW_SUPPORTED(dhd, vsdb))
10942                 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
10943         if (FW_SUPPORTED(dhd, nan)) {
10944                 feature_set |= WIFI_FEATURE_NAN;
10945                 /* NAN is essentail for d2d rtt */
10946                 if (FW_SUPPORTED(dhd, rttd2d))
10947                         feature_set |= WIFI_FEATURE_D2D_RTT;
10948         }
10949 #ifdef RTT_SUPPORT
10950         feature_set |= WIFI_FEATURE_D2AP_RTT;
10951 #endif /* RTT_SUPPORT */
10952 #ifdef LINKSTAT_SUPPORT
10953         feature_set |= WIFI_FEATURE_LINKSTAT;
10954 #endif /* LINKSTAT_SUPPORT */
10955         /* Supports STA + STA always */
10956         feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
10957 #ifdef PNO_SUPPORT
10958         if (dhd_is_pno_supported(dhd)) {
10959                 feature_set |= WIFI_FEATURE_PNO;
10960                 feature_set |= WIFI_FEATURE_BATCH_SCAN;
10961 #ifdef GSCAN_SUPPORT
10962                 feature_set |= WIFI_FEATURE_GSCAN;
10963 #endif /* GSCAN_SUPPORT */
10964         }
10965 #endif /* PNO_SUPPORT */
10966 #ifdef WL11U
10967         feature_set |= WIFI_FEATURE_HOTSPOT;
10968 #endif /* WL11U */
10969         return feature_set;
10970 }
10971
10972
10973 int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
10974 {
10975         int feature_set_full, mem_needed;
10976         int *ret;
10977
10978         *num = 0;
10979         mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10980         ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
10981         if (!ret) {
10982                 DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
10983                         mem_needed));
10984                 return ret;
10985         }
10986
10987         feature_set_full = dhd_dev_get_feature_set(dev);
10988
10989         ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
10990                  (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10991                  (feature_set_full & WIFI_FEATURE_NAN) |
10992                  (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10993                  (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10994                  (feature_set_full & WIFI_FEATURE_PNO) |
10995                  (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
10996                  (feature_set_full & WIFI_FEATURE_GSCAN) |
10997                  (feature_set_full & WIFI_FEATURE_HOTSPOT) |
10998                  (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
10999                  (feature_set_full & WIFI_FEATURE_EPR);
11000
11001         ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
11002                  (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11003                  /* Not yet verified NAN with P2P */
11004                  /* (feature_set_full & WIFI_FEATURE_NAN) | */
11005                  (feature_set_full & WIFI_FEATURE_P2P) |
11006                  (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11007                  (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11008                  (feature_set_full & WIFI_FEATURE_EPR);
11009
11010         ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
11011                  (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11012                  (feature_set_full & WIFI_FEATURE_NAN) |
11013                  (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11014                  (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11015                  (feature_set_full & WIFI_FEATURE_TDLS) |
11016                  (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
11017                  (feature_set_full & WIFI_FEATURE_EPR);
11018         *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
11019
11020         return ret;
11021 }
11022 #ifdef CUSTOM_FORCE_NODFS_FLAG
11023 int
11024 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
11025 {
11026         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11027
11028         if (nodfs)
11029                 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
11030         else
11031                 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
11032         dhd->pub.force_country_change = TRUE;
11033         return 0;
11034 }
11035 #endif /* CUSTOM_FORCE_NODFS_FLAG */
11036 #ifdef PNO_SUPPORT
11037 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
11038 int
11039 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
11040 {
11041         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11042
11043         return (dhd_pno_stop_for_ssid(&dhd->pub));
11044 }
11045 /* Linux wrapper to call common dhd_pno_set_for_ssid */
11046 int
11047 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
11048         uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
11049 {
11050         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11051
11052         return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
11053                 pno_repeat, pno_freq_expo_max, channel_list, nchan));
11054 }
11055
11056 /* Linux wrapper to call common dhd_pno_enable */
11057 int
11058 dhd_dev_pno_enable(struct net_device *dev, int enable)
11059 {
11060         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11061
11062         return (dhd_pno_enable(&dhd->pub, enable));
11063 }
11064
11065 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
11066 int
11067 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
11068         struct dhd_pno_hotlist_params *hotlist_params)
11069 {
11070         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11071         return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
11072 }
11073 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
11074 int
11075 dhd_dev_pno_stop_for_batch(struct net_device *dev)
11076 {
11077         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11078         return (dhd_pno_stop_for_batch(&dhd->pub));
11079 }
11080 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
11081 int
11082 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
11083 {
11084         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11085         return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
11086 }
11087 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
11088 int
11089 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
11090 {
11091         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11092         return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
11093 }
11094 /* Linux wrapper to call common dhd_pno_set_mac_oui */
11095 int
11096 dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
11097 {
11098         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11099         return (dhd_pno_set_mac_oui(&dhd->pub, oui));
11100 }
11101 #endif /* PNO_SUPPORT */
11102
11103 #if defined(PNO_SUPPORT)
11104 #ifdef GSCAN_SUPPORT
11105 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11106 int
11107 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11108  void *buf, uint8 flush)
11109 {
11110         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11111
11112         return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
11113 }
11114
11115 /* Linux wrapper to call common dhd_pno_get_gscan */
11116 void *
11117 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11118                       void *info, uint32 *len)
11119 {
11120         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11121
11122         return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
11123 }
11124
11125 /* Linux wrapper to call common dhd_wait_batch_results_complete */
11126 void
11127 dhd_dev_wait_batch_results_complete(struct net_device *dev)
11128 {
11129         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11130
11131         return (dhd_wait_batch_results_complete(&dhd->pub));
11132 }
11133
11134 /* Linux wrapper to call common dhd_pno_lock_batch_results */
11135 void
11136 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
11137 {
11138         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11139
11140         return (dhd_pno_lock_batch_results(&dhd->pub));
11141 }
11142 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
11143 void
11144 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
11145 {
11146         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11147
11148         return (dhd_pno_unlock_batch_results(&dhd->pub));
11149 }
11150
11151 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
11152 int
11153 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
11154 {
11155         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11156
11157         return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
11158 }
11159
11160 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
11161 int
11162 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
11163 {
11164         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11165
11166         return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
11167 }
11168
11169 /* Linux wrapper to call common dhd_handle_swc_evt */
11170 void *
11171 dhd_dev_swc_scan_event(struct net_device *dev, const void  *data, int *send_evt_bytes)
11172 {
11173         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11174
11175         return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
11176 }
11177
11178 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
11179 void *
11180 dhd_dev_hotlist_scan_event(struct net_device *dev,
11181       const void  *data, int *send_evt_bytes, hotlist_type_t type)
11182 {
11183         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11184
11185         return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
11186 }
11187
11188 /* Linux wrapper to call common dhd_process_full_gscan_result */
11189 void *
11190 dhd_dev_process_full_gscan_result(struct net_device *dev,
11191 const void  *data, int *send_evt_bytes)
11192 {
11193         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11194
11195         return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
11196 }
11197
11198 void
11199 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
11200 {
11201         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11202
11203         dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
11204
11205         return;
11206 }
11207
11208 int
11209 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
11210 {
11211         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11212
11213         return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
11214 }
11215
11216 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
11217 int
11218 dhd_dev_retrieve_batch_scan(struct net_device *dev)
11219 {
11220         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11221
11222         return (dhd_retreive_batch_scan_results(&dhd->pub));
11223 }
11224 #endif /* GSCAN_SUPPORT */
11225 #endif 
11226 #ifdef RTT_SUPPORT
11227 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11228 int
11229 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
11230 {
11231         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11232
11233         return (dhd_rtt_set_cfg(&dhd->pub, buf));
11234 }
11235 int
11236 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
11237 {
11238         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11239
11240         return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
11241 }
11242 int
11243 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
11244 {
11245         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11246
11247         return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
11248 }
11249 int
11250 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
11251 {
11252         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11253
11254         return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
11255 }
11256
11257 int
11258 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
11259 {
11260         dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11261
11262         return (dhd_rtt_capability(&dhd->pub, capa));
11263 }
11264
11265 #endif /* RTT_SUPPORT */
11266
11267 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11268 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
11269 {
11270         dhd_info_t *dhd;
11271         struct net_device *dev;
11272
11273         dhd = (dhd_info_t *)dhd_info;
11274         dev = dhd->iflist[0]->net;
11275
11276         if (dev) {
11277 #if defined(WL_WIRELESS_EXT)
11278                 wl_iw_send_priv_event(dev, "HANG");
11279 #endif
11280 #if defined(WL_CFG80211)
11281                 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11282 #endif
11283         }
11284 }
11285
11286 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
11287 extern dhd_pub_t *link_recovery;
11288 void dhd_host_recover_link(void)
11289 {
11290         DHD_ERROR(("****** %s ******\n", __FUNCTION__));
11291         link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
11292         dhd_bus_set_linkdown(link_recovery, TRUE);
11293         dhd_os_send_hang_message(link_recovery);
11294 }
11295 EXPORT_SYMBOL(dhd_host_recover_link);
11296 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
11297
11298 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
11299 {
11300         int ret = 0;
11301         if (dhdp) {
11302                 if (!dhdp->hang_was_sent) {
11303                         dhdp->hang_was_sent = 1;
11304                         dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
11305                                 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
11306                         DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d s=%d\n", __FUNCTION__,
11307                                 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
11308                 }
11309         }
11310         return ret;
11311 }
11312
11313 int net_os_send_hang_message(struct net_device *dev)
11314 {
11315         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11316         int ret = 0;
11317
11318         if (dhd) {
11319                 /* Report FW problem when enabled */
11320                 if (dhd->pub.hang_report) {
11321 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11322                         ret = dhd_os_send_hang_message(&dhd->pub);
11323 #else
11324                         ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11325 #endif
11326                 } else {
11327                         DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
11328                                 __FUNCTION__));
11329                         /* Enforce bus down to stop any future traffic */
11330                         dhd->pub.busstate = DHD_BUS_DOWN;
11331                 }
11332         }
11333         return ret;
11334 }
11335
11336 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
11337 {
11338         dhd_info_t *dhd = NULL;
11339         dhd_pub_t *dhdp = NULL;
11340         int reason;
11341
11342         dhd = DHD_DEV_INFO(dev);
11343         if (dhd) {
11344                 dhdp = &dhd->pub;
11345         }
11346
11347         if (!dhd || !dhdp) {
11348                 return 0;
11349         }
11350
11351         reason = bcm_strtoul(string_num, NULL, 0);
11352         DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
11353
11354         if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
11355                 reason = 0;
11356         }
11357
11358         dhdp->hang_reason = reason;
11359
11360         return net_os_send_hang_message(dev);
11361 }
11362 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
11363
11364
11365 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
11366 {
11367         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11368         return wifi_platform_set_power(dhd->adapter, on, delay_msec);
11369 }
11370
11371 bool dhd_force_country_change(struct net_device *dev)
11372 {
11373         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11374
11375         if (dhd && dhd->pub.up)
11376                 return dhd->pub.force_country_change;
11377         return FALSE;
11378 }
11379 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
11380         wl_country_t *cspec)
11381 {
11382         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11383 #ifdef CUSTOM_COUNTRY_CODE
11384         get_customized_country_code(dhd->adapter, country_iso_code, cspec,
11385                         dhd->pub.dhd_cflags);
11386 #else
11387         get_customized_country_code(dhd->adapter, country_iso_code, cspec);
11388 #endif /* CUSTOM_COUNTRY_CODE */
11389 }
11390 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
11391 {
11392         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11393         if (dhd && dhd->pub.up) {
11394                 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
11395 #ifdef WL_CFG80211
11396                 wl_update_wiphybands(NULL, notify);
11397 #endif
11398         }
11399 }
11400
11401 void dhd_bus_band_set(struct net_device *dev, uint band)
11402 {
11403         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11404         if (dhd && dhd->pub.up) {
11405 #ifdef WL_CFG80211
11406                 wl_update_wiphybands(NULL, true);
11407 #endif
11408         }
11409 }
11410
11411 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
11412 {
11413         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11414
11415         if (!fw || fw[0] == '\0')
11416                 return -EINVAL;
11417
11418         strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
11419         dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
11420
11421 #if defined(SOFTAP)
11422         if (strstr(fw, "apsta") != NULL) {
11423                 DHD_INFO(("GOT APSTA FIRMWARE\n"));
11424                 ap_fw_loaded = TRUE;
11425         } else {
11426                 DHD_INFO(("GOT STA FIRMWARE\n"));
11427                 ap_fw_loaded = FALSE;
11428         }
11429 #endif 
11430         return 0;
11431 }
11432
11433 void dhd_net_if_lock(struct net_device *dev)
11434 {
11435         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11436         dhd_net_if_lock_local(dhd);
11437 }
11438
11439 void dhd_net_if_unlock(struct net_device *dev)
11440 {
11441         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11442         dhd_net_if_unlock_local(dhd);
11443 }
11444
11445 static void dhd_net_if_lock_local(dhd_info_t *dhd)
11446 {
11447 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11448         if (dhd)
11449                 mutex_lock(&dhd->dhd_net_if_mutex);
11450 #endif
11451 }
11452
11453 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
11454 {
11455 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11456         if (dhd)
11457                 mutex_unlock(&dhd->dhd_net_if_mutex);
11458 #endif
11459 }
11460
11461 static void dhd_suspend_lock(dhd_pub_t *pub)
11462 {
11463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11464         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11465         if (dhd)
11466                 mutex_lock(&dhd->dhd_suspend_mutex);
11467 #endif
11468 }
11469
11470 static void dhd_suspend_unlock(dhd_pub_t *pub)
11471 {
11472 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11473         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11474         if (dhd)
11475                 mutex_unlock(&dhd->dhd_suspend_mutex);
11476 #endif
11477 }
11478
11479 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
11480 {
11481         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11482         unsigned long flags = 0;
11483
11484         if (dhd)
11485                 spin_lock_irqsave(&dhd->dhd_lock, flags);
11486
11487         return flags;
11488 }
11489
11490 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
11491 {
11492         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11493
11494         if (dhd)
11495                 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
11496 }
11497
11498 /* Linux specific multipurpose spinlock API */
11499 void *
11500 dhd_os_spin_lock_init(osl_t *osh)
11501 {
11502         /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
11503         /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
11504         /* and this results in kernel asserts in internal builds */
11505         spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
11506         if (lock)
11507                 spin_lock_init(lock);
11508         return ((void *)lock);
11509 }
11510 void
11511 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
11512 {
11513         if (lock)
11514                 MFREE(osh, lock, sizeof(spinlock_t) + 4);
11515 }
11516 unsigned long
11517 dhd_os_spin_lock(void *lock)
11518 {
11519         unsigned long flags = 0;
11520
11521         if (lock)
11522                 spin_lock_irqsave((spinlock_t *)lock, flags);
11523
11524         return flags;
11525 }
11526 void
11527 dhd_os_spin_unlock(void *lock, unsigned long flags)
11528 {
11529         if (lock)
11530                 spin_unlock_irqrestore((spinlock_t *)lock, flags);
11531 }
11532
11533 static int
11534 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
11535 {
11536         return (atomic_read(&dhd->pend_8021x_cnt));
11537 }
11538
11539 #define MAX_WAIT_FOR_8021X_TX   100
11540
11541 int
11542 dhd_wait_pend8021x(struct net_device *dev)
11543 {
11544         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11545         int timeout = msecs_to_jiffies(10);
11546         int ntimes = MAX_WAIT_FOR_8021X_TX;
11547         int pend = dhd_get_pend_8021x_cnt(dhd);
11548
11549         while (ntimes && pend) {
11550                 if (pend) {
11551                         set_current_state(TASK_INTERRUPTIBLE);
11552                         DHD_PERIM_UNLOCK(&dhd->pub);
11553                         schedule_timeout(timeout);
11554                         DHD_PERIM_LOCK(&dhd->pub);
11555                         set_current_state(TASK_RUNNING);
11556                         ntimes--;
11557                 }
11558                 pend = dhd_get_pend_8021x_cnt(dhd);
11559         }
11560         if (ntimes == 0)
11561         {
11562                 atomic_set(&dhd->pend_8021x_cnt, 0);
11563                 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
11564         }
11565         return pend;
11566 }
11567
11568 #ifdef DHD_DEBUG
11569 static void
11570 dhd_convert_memdump_type_to_str(uint32 type, char *buf)
11571 {
11572         char *type_str = NULL;
11573
11574         switch (type) {
11575                 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
11576                         type_str = "resumed_on_timeout";
11577                         break;
11578                 case DUMP_TYPE_D3_ACK_TIMEOUT:
11579                         type_str = "D3_ACK_timeout";
11580                         break;
11581                 case DUMP_TYPE_DONGLE_TRAP:
11582                         type_str = "Dongle_Trap";
11583                         break;
11584                 case DUMP_TYPE_MEMORY_CORRUPTION:
11585                         type_str = "Memory_Corruption";
11586                         break;
11587                 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
11588                         type_str = "PKTID_AUDIT_Fail";
11589                         break;
11590                 case DUMP_TYPE_SCAN_TIMEOUT:
11591                         type_str = "SCAN_timeout";
11592                         break;
11593                 case DUMP_TYPE_SCAN_BUSY:
11594                         type_str = "SCAN_Busy";
11595                         break;
11596                 case DUMP_TYPE_BY_SYSDUMP:
11597                         type_str = "BY_SYSDUMP";
11598                         break;
11599                 case DUMP_TYPE_BY_LIVELOCK:
11600                         type_str = "BY_LIVELOCK";
11601                         break;
11602                 case DUMP_TYPE_AP_LINKUP_FAILURE:
11603                         type_str = "BY_AP_LINK_FAILURE";
11604                         break;
11605                 default:
11606                         type_str = "Unknown_type";
11607                         break;
11608         }
11609
11610         strncpy(buf, type_str, strlen(type_str));
11611         buf[strlen(type_str)] = 0;
11612 }
11613
11614 int
11615 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
11616 {
11617         int ret = 0;
11618         struct file *fp = NULL;
11619         mm_segment_t old_fs;
11620         loff_t pos = 0;
11621         char memdump_path[128];
11622         char memdump_type[32];
11623         struct timeval curtime;
11624         uint32 file_mode;
11625
11626         /* change to KERNEL_DS address limit */
11627         old_fs = get_fs();
11628         set_fs(KERNEL_DS);
11629
11630         /* Init file name */
11631         memset(memdump_path, 0, sizeof(memdump_path));
11632         memset(memdump_type, 0, sizeof(memdump_type));
11633         do_gettimeofday(&curtime);
11634         dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
11635 #ifdef CUSTOMER_HW4_DEBUG
11636         snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11637                 DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
11638                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11639         file_mode = O_CREAT | O_WRONLY | O_SYNC;
11640 #elif defined(CUSTOMER_HW2)
11641         snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11642                 "/data/misc/wifi/mem_dump", memdump_type,
11643                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11644         file_mode = O_CREAT | O_WRONLY | O_SYNC;
11645 #else
11646         snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11647                 "/installmedia/mem_dump", memdump_type,
11648                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11649         /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
11650          * calling BUG_ON immediately after collecting the socram dump.
11651          * So the file write operation should directly write the contents into the
11652          * file instead of caching it. O_TRUNC flag ensures that file will be re-written
11653          * instead of appending.
11654          */
11655         file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
11656 #endif /* CUSTOMER_HW4_DEBUG */
11657
11658         /* print SOCRAM dump file path */
11659         DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
11660
11661         /* open file to write */
11662         fp = filp_open(memdump_path, file_mode, 0644);
11663         if (IS_ERR(fp)) {
11664                 ret = PTR_ERR(fp);
11665                 printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
11666                 goto exit;
11667         }
11668
11669         /* Write buf to file */
11670         fp->f_op->write(fp, buf, size, &pos);
11671
11672 exit:
11673         /* close file before return */
11674         if (!ret)
11675                 filp_close(fp, current->files);
11676
11677         /* restore previous address limit */
11678         set_fs(old_fs);
11679
11680         /* free buf before return */
11681 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
11682         DHD_OS_PREFREE(dhd, buf, size);
11683 #else
11684         MFREE(dhd->osh, buf, size);
11685 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
11686
11687         return ret;
11688 }
11689 #endif /* DHD_DEBUG */
11690
11691 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
11692 {
11693         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11694         unsigned long flags;
11695         int ret = 0;
11696
11697         if (dhd) {
11698                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11699                 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
11700                         dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
11701 #ifdef CONFIG_HAS_WAKELOCK
11702                 if (dhd->wakelock_rx_timeout_enable)
11703                         wake_lock_timeout(&dhd->wl_rxwake,
11704                                 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
11705                 if (dhd->wakelock_ctrl_timeout_enable)
11706                         wake_lock_timeout(&dhd->wl_ctrlwake,
11707                                 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
11708 #endif
11709                 dhd->wakelock_rx_timeout_enable = 0;
11710                 dhd->wakelock_ctrl_timeout_enable = 0;
11711                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11712         }
11713         return ret;
11714 }
11715
11716 int net_os_wake_lock_timeout(struct net_device *dev)
11717 {
11718         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11719         int ret = 0;
11720
11721         if (dhd)
11722                 ret = dhd_os_wake_lock_timeout(&dhd->pub);
11723         return ret;
11724 }
11725
11726 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
11727 {
11728         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11729         unsigned long flags;
11730
11731         if (dhd) {
11732                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11733                 if (val > dhd->wakelock_rx_timeout_enable)
11734                         dhd->wakelock_rx_timeout_enable = val;
11735                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11736         }
11737         return 0;
11738 }
11739
11740 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
11741 {
11742         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11743         unsigned long flags;
11744
11745         if (dhd) {
11746                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11747                 if (val > dhd->wakelock_ctrl_timeout_enable)
11748                         dhd->wakelock_ctrl_timeout_enable = val;
11749                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11750         }
11751         return 0;
11752 }
11753
11754 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
11755 {
11756         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11757         unsigned long flags;
11758
11759         if (dhd) {
11760                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11761                 dhd->wakelock_ctrl_timeout_enable = 0;
11762 #ifdef CONFIG_HAS_WAKELOCK
11763                 if (wake_lock_active(&dhd->wl_ctrlwake))
11764                         wake_unlock(&dhd->wl_ctrlwake);
11765 #endif
11766                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11767         }
11768         return 0;
11769 }
11770
11771 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
11772 {
11773         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11774         int ret = 0;
11775
11776         if (dhd)
11777                 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
11778         return ret;
11779 }
11780
11781 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
11782 {
11783         dhd_info_t *dhd = DHD_DEV_INFO(dev);
11784         int ret = 0;
11785
11786         if (dhd)
11787                 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
11788         return ret;
11789 }
11790
11791
11792 #if defined(DHD_TRACE_WAKE_LOCK)
11793 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11794 #include <linux/hashtable.h>
11795 #else
11796 #include <linux/hash.h>
11797 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11798
11799
11800 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11801 /* Define 2^5 = 32 bucket size hash table */
11802 DEFINE_HASHTABLE(wklock_history, 5);
11803 #else
11804 /* Define 2^5 = 32 bucket size hash table */
11805 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
11806 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11807
11808 int trace_wklock_onoff = 1;
11809
11810 typedef enum dhd_wklock_type {
11811         DHD_WAKE_LOCK,
11812         DHD_WAKE_UNLOCK,
11813         DHD_WAIVE_LOCK,
11814         DHD_RESTORE_LOCK
11815 } dhd_wklock_t;
11816
11817 struct wk_trace_record {
11818         unsigned long addr;                 /* Address of the instruction */
11819         dhd_wklock_t lock_type;         /* lock_type */
11820         unsigned long long counter;             /* counter information */
11821         struct hlist_node wklock_node;  /* hash node */
11822 };
11823
11824
11825 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
11826 {
11827         struct wk_trace_record *wklock_info;
11828 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11829         hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
11830 #else
11831         struct hlist_node *entry;
11832         int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
11833         hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
11834 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11835         {
11836                 if (wklock_info->addr == addr) {
11837                         return wklock_info;
11838                 }
11839         }
11840         return NULL;
11841 }
11842
11843 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11844 #define HASH_ADD(hashtable, node, key) \
11845         do { \
11846                 hash_add(hashtable, node, key); \
11847         } while (0);
11848 #else
11849 #define HASH_ADD(hashtable, node, key) \
11850         do { \
11851                 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
11852                 hlist_add_head(node, &hashtable[index]); \
11853         } while (0);
11854 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
11855
11856 #define STORE_WKLOCK_RECORD(wklock_type) \
11857         do { \
11858                 struct wk_trace_record *wklock_info = NULL; \
11859                 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
11860                 wklock_info = find_wklock_entry(func_addr); \
11861                 if (wklock_info) { \
11862                         if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
11863                                 wklock_info->counter = dhd->wakelock_counter; \
11864                         } else { \
11865                                 wklock_info->counter++; \
11866                         } \
11867                 } else { \
11868                         wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
11869                         if (!wklock_info) {\
11870                                 printk("Can't allocate wk_trace_record \n"); \
11871                         } else { \
11872                                 wklock_info->addr = func_addr; \
11873                                 wklock_info->lock_type = wklock_type; \
11874                                 if (wklock_type == DHD_WAIVE_LOCK || \
11875                                                 wklock_type == DHD_RESTORE_LOCK) { \
11876                                         wklock_info->counter = dhd->wakelock_counter; \
11877                                 } else { \
11878                                         wklock_info->counter++; \
11879                                 } \
11880                                 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
11881                         } \
11882                 } \
11883         } while (0);
11884
11885 static inline void dhd_wk_lock_rec_dump(void)
11886 {
11887         int bkt;
11888         struct wk_trace_record *wklock_info;
11889
11890 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11891         hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
11892 #else
11893         struct hlist_node *entry = NULL;
11894         int max_index = ARRAY_SIZE(wklock_history);
11895         for (bkt = 0; bkt < max_index; bkt++)
11896                 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
11897 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11898                 {
11899                         switch (wklock_info->lock_type) {
11900                                 case DHD_WAKE_LOCK:
11901                                         DHD_ERROR(("wakelock lock : %pS  lock_counter : %llu\n",
11902                                                 (void *)wklock_info->addr, wklock_info->counter));
11903                                         break;
11904                                 case DHD_WAKE_UNLOCK:
11905                                         DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
11906                                                 (void *)wklock_info->addr, wklock_info->counter));
11907                                         break;
11908                                 case DHD_WAIVE_LOCK:
11909                                         DHD_ERROR(("wakelock waive : %pS  before_waive : %llu\n",
11910                                                 (void *)wklock_info->addr, wklock_info->counter));
11911                                         break;
11912                                 case DHD_RESTORE_LOCK:
11913                                         DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
11914                                                 (void *)wklock_info->addr, wklock_info->counter));
11915                                         break;
11916                         }
11917                 }
11918 }
11919
11920 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
11921 {
11922         unsigned long flags;
11923 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11924         int i;
11925 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11926
11927         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11928 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11929         hash_init(wklock_history);
11930 #else
11931         for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
11932                 INIT_HLIST_HEAD(&wklock_history[i]);
11933 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11934         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11935 }
11936
11937 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
11938 {
11939         int bkt;
11940         struct wk_trace_record *wklock_info;
11941         struct hlist_node *tmp;
11942         unsigned long flags;
11943 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11944         struct hlist_node *entry = NULL;
11945         int max_index = ARRAY_SIZE(wklock_history);
11946 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11947
11948         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11949 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11950         hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
11951 #else
11952         for (bkt = 0; bkt < max_index; bkt++)
11953                 hlist_for_each_entry_safe(wklock_info, entry, tmp,
11954                         &wklock_history[bkt], wklock_node)
11955 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11956                 {
11957 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11958                         hash_del(&wklock_info->wklock_node);
11959 #else
11960                         hlist_del_init(&wklock_info->wklock_node);
11961 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11962                         kfree(wklock_info);
11963                 }
11964         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11965 }
11966
11967 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
11968 {
11969         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
11970         unsigned long flags;
11971
11972         DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
11973         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11974         dhd_wk_lock_rec_dump();
11975         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11976         DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
11977 }
11978 #else
11979 #define STORE_WKLOCK_RECORD(wklock_type)
11980 #endif /* ! DHD_TRACE_WAKE_LOCK */
11981
11982 int dhd_os_wake_lock(dhd_pub_t *pub)
11983 {
11984         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11985         unsigned long flags;
11986         int ret = 0;
11987
11988         if (dhd) {
11989                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11990                 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
11991 #ifdef CONFIG_HAS_WAKELOCK
11992                         wake_lock(&dhd->wl_wifi);
11993 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
11994                         dhd_bus_dev_pm_stay_awake(pub);
11995 #endif
11996                 }
11997 #ifdef DHD_TRACE_WAKE_LOCK
11998                 if (trace_wklock_onoff) {
11999                         STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
12000                 }
12001 #endif /* DHD_TRACE_WAKE_LOCK */
12002                 dhd->wakelock_counter++;
12003                 ret = dhd->wakelock_counter;
12004                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12005         }
12006
12007         return ret;
12008 }
12009
12010 int dhd_event_wake_lock(dhd_pub_t *pub)
12011 {
12012         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12013         unsigned long flags;
12014         int ret = 0;
12015
12016         if (dhd) {
12017                 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12018                 if (dhd->wakelock_event_counter == 0) {
12019 #ifdef CONFIG_HAS_WAKELOCK
12020                         wake_lock(&dhd->wl_evtwake);
12021 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12022                         dhd_bus_dev_pm_stay_awake(pub);
12023 #endif
12024                 }
12025                 dhd->wakelock_event_counter++;
12026                 ret = dhd->wakelock_event_counter;
12027                 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12028         }
12029
12030         return ret;
12031 }
12032
12033 int net_os_wake_lock(struct net_device *dev)
12034 {
12035         dhd_info_t *dhd = DHD_DEV_INFO(dev);
12036         int ret = 0;
12037
12038         if (dhd)
12039                 ret = dhd_os_wake_lock(&dhd->pub);
12040         return ret;
12041 }
12042
12043 int dhd_os_wake_unlock(dhd_pub_t *pub)
12044 {
12045         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12046         unsigned long flags;
12047         int ret = 0;
12048
12049         dhd_os_wake_lock_timeout(pub);
12050         if (dhd) {
12051                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12052
12053                 if (dhd->wakelock_counter > 0) {
12054                         dhd->wakelock_counter--;
12055 #ifdef DHD_TRACE_WAKE_LOCK
12056                         if (trace_wklock_onoff) {
12057                                 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
12058                         }
12059 #endif /* DHD_TRACE_WAKE_LOCK */
12060                         if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12061 #ifdef CONFIG_HAS_WAKELOCK
12062                                 wake_unlock(&dhd->wl_wifi);
12063 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12064                                 dhd_bus_dev_pm_relax(pub);
12065 #endif
12066                         }
12067                         ret = dhd->wakelock_counter;
12068                 }
12069                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12070         }
12071         return ret;
12072 }
12073
12074 int dhd_event_wake_unlock(dhd_pub_t *pub)
12075 {
12076         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12077         unsigned long flags;
12078         int ret = 0;
12079
12080         if (dhd) {
12081                 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12082
12083                 if (dhd->wakelock_event_counter > 0) {
12084                         dhd->wakelock_event_counter--;
12085                         if (dhd->wakelock_event_counter == 0) {
12086 #ifdef CONFIG_HAS_WAKELOCK
12087                                 wake_unlock(&dhd->wl_evtwake);
12088 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12089                                 dhd_bus_dev_pm_relax(pub);
12090 #endif
12091                         }
12092                         ret = dhd->wakelock_event_counter;
12093                 }
12094                 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12095         }
12096         return ret;
12097 }
12098
12099 int dhd_os_check_wakelock(dhd_pub_t *pub)
12100 {
12101 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12102         KERNEL_VERSION(2, 6, 36)))
12103         dhd_info_t *dhd;
12104
12105         if (!pub)
12106                 return 0;
12107         dhd = (dhd_info_t *)(pub->info);
12108 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12109
12110 #ifdef CONFIG_HAS_WAKELOCK
12111         /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
12112         if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
12113                 (wake_lock_active(&dhd->wl_wdwake))))
12114                 return 1;
12115 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12116         if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
12117                 return 1;
12118 #endif
12119         return 0;
12120 }
12121
12122 int
12123 dhd_os_check_wakelock_all(dhd_pub_t *pub)
12124 {
12125 #ifdef CONFIG_HAS_WAKELOCK
12126         int l1, l2, l3, l4, l7;
12127         int l5 = 0, l6 = 0;
12128         int c, lock_active;
12129 #endif /* CONFIG_HAS_WAKELOCK */
12130 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12131         KERNEL_VERSION(2, 6, 36)))
12132         dhd_info_t *dhd;
12133
12134         if (!pub) {
12135                 return 0;
12136         }
12137         dhd = (dhd_info_t *)(pub->info);
12138         if (!dhd) {
12139                 return 0;
12140         }
12141 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12142
12143 #ifdef CONFIG_HAS_WAKELOCK
12144         c = dhd->wakelock_counter;
12145         l1 = wake_lock_active(&dhd->wl_wifi);
12146         l2 = wake_lock_active(&dhd->wl_wdwake);
12147         l3 = wake_lock_active(&dhd->wl_rxwake);
12148         l4 = wake_lock_active(&dhd->wl_ctrlwake);
12149 #ifdef BCMPCIE_OOB_HOST_WAKE
12150         l5 = wake_lock_active(&dhd->wl_intrwake);
12151 #endif /* BCMPCIE_OOB_HOST_WAKE */
12152 #ifdef DHD_USE_SCAN_WAKELOCK
12153         l6 = wake_lock_active(&dhd->wl_scanwake);
12154 #endif /* DHD_USE_SCAN_WAKELOCK */
12155         l7 = wake_lock_active(&dhd->wl_evtwake);
12156         lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
12157
12158         /* Indicate to the Host to avoid going to suspend if internal locks are up */
12159         if (dhd && lock_active) {
12160                 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
12161                         "ctl-%d intr-%d scan-%d evt-%d\n",
12162                         __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
12163                 return 1;
12164         }
12165 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12166         if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
12167                 return 1;
12168         }
12169 #endif /* CONFIG_HAS_WAKELOCK */
12170         return 0;
12171 }
12172
12173 int net_os_wake_unlock(struct net_device *dev)
12174 {
12175         dhd_info_t *dhd = DHD_DEV_INFO(dev);
12176         int ret = 0;
12177
12178         if (dhd)
12179                 ret = dhd_os_wake_unlock(&dhd->pub);
12180         return ret;
12181 }
12182
12183 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
12184 {
12185         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12186         unsigned long flags;
12187         int ret = 0;
12188
12189         if (dhd) {
12190                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12191 #ifdef CONFIG_HAS_WAKELOCK
12192                 /* if wakelock_wd_counter was never used : lock it at once */
12193                 if (!dhd->wakelock_wd_counter)
12194                         wake_lock(&dhd->wl_wdwake);
12195 #endif
12196                 dhd->wakelock_wd_counter++;
12197                 ret = dhd->wakelock_wd_counter;
12198                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12199         }
12200         return ret;
12201 }
12202
12203 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
12204 {
12205         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12206         unsigned long flags;
12207         int ret = 0;
12208
12209         if (dhd) {
12210                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12211                 if (dhd->wakelock_wd_counter) {
12212                         dhd->wakelock_wd_counter = 0;
12213 #ifdef CONFIG_HAS_WAKELOCK
12214                         wake_unlock(&dhd->wl_wdwake);
12215 #endif
12216                 }
12217                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12218         }
12219         return ret;
12220 }
12221
12222 #ifdef BCMPCIE_OOB_HOST_WAKE
12223 void
12224 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
12225 {
12226 #ifdef CONFIG_HAS_WAKELOCK
12227         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12228
12229         if (dhd) {
12230                 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
12231         }
12232 #endif /* CONFIG_HAS_WAKELOCK */
12233 }
12234
12235 void
12236 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
12237 {
12238 #ifdef CONFIG_HAS_WAKELOCK
12239         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12240
12241         if (dhd) {
12242                 /* if wl_intrwake is active, unlock it */
12243                 if (wake_lock_active(&dhd->wl_intrwake)) {
12244                         wake_unlock(&dhd->wl_intrwake);
12245                 }
12246         }
12247 #endif /* CONFIG_HAS_WAKELOCK */
12248 }
12249 #endif /* BCMPCIE_OOB_HOST_WAKE */
12250
12251 #ifdef DHD_USE_SCAN_WAKELOCK
12252 void
12253 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
12254 {
12255 #ifdef CONFIG_HAS_WAKELOCK
12256         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12257
12258         if (dhd) {
12259                 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
12260         }
12261 #endif /* CONFIG_HAS_WAKELOCK */
12262 }
12263
12264 void
12265 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
12266 {
12267 #ifdef CONFIG_HAS_WAKELOCK
12268         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12269
12270         if (dhd) {
12271                 /* if wl_scanwake is active, unlock it */
12272                 if (wake_lock_active(&dhd->wl_scanwake)) {
12273                         wake_unlock(&dhd->wl_scanwake);
12274                 }
12275         }
12276 #endif /* CONFIG_HAS_WAKELOCK */
12277 }
12278 #endif /* DHD_USE_SCAN_WAKELOCK */
12279
12280 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
12281  * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
12282  */
12283 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
12284 {
12285         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12286         unsigned long flags;
12287         int ret = 0;
12288
12289         if (dhd) {
12290                 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12291
12292                 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12293                 if (dhd->waive_wakelock == FALSE) {
12294 #ifdef DHD_TRACE_WAKE_LOCK
12295                         if (trace_wklock_onoff) {
12296                                 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
12297                         }
12298 #endif /* DHD_TRACE_WAKE_LOCK */
12299                         /* record current lock status */
12300                         dhd->wakelock_before_waive = dhd->wakelock_counter;
12301                         dhd->waive_wakelock = TRUE;
12302                 }
12303                 ret = dhd->wakelock_wd_counter;
12304                 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12305         }
12306         return ret;
12307 }
12308
12309 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
12310 {
12311         dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12312         unsigned long flags;
12313         int ret = 0;
12314
12315         if (!dhd)
12316                 return 0;
12317
12318         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12319
12320         /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12321         if (!dhd->waive_wakelock)
12322                 goto exit;
12323
12324         dhd->waive_wakelock = FALSE;
12325         /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
12326          * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
12327          * the lock in between, do the same by calling wake_unlock or pm_relax
12328          */
12329 #ifdef DHD_TRACE_WAKE_LOCK
12330         if (trace_wklock_onoff) {
12331                 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
12332         }
12333 #endif /* DHD_TRACE_WAKE_LOCK */
12334
12335         if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
12336 #ifdef CONFIG_HAS_WAKELOCK
12337                 wake_lock(&dhd->wl_wifi);
12338 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12339                 dhd_bus_dev_pm_stay_awake(&dhd->pub);
12340 #endif
12341         } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
12342 #ifdef CONFIG_HAS_WAKELOCK
12343                 wake_unlock(&dhd->wl_wifi);
12344 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12345                 dhd_bus_dev_pm_relax(&dhd->pub);
12346 #endif
12347         }
12348         dhd->wakelock_before_waive = 0;
12349 exit:
12350         ret = dhd->wakelock_wd_counter;
12351         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12352         return ret;
12353 }
12354
12355 void dhd_os_wake_lock_init(struct dhd_info *dhd)
12356 {
12357         DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
12358         dhd->wakelock_event_counter = 0;
12359         dhd->wakelock_counter = 0;
12360         dhd->wakelock_rx_timeout_enable = 0;
12361         dhd->wakelock_ctrl_timeout_enable = 0;
12362 #ifdef CONFIG_HAS_WAKELOCK
12363         wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
12364         wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
12365         wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
12366         wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
12367 #ifdef BCMPCIE_OOB_HOST_WAKE
12368         wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
12369 #endif /* BCMPCIE_OOB_HOST_WAKE */
12370 #ifdef DHD_USE_SCAN_WAKELOCK
12371         wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
12372 #endif /* DHD_USE_SCAN_WAKELOCK */
12373 #endif /* CONFIG_HAS_WAKELOCK */
12374 #ifdef DHD_TRACE_WAKE_LOCK
12375         dhd_wk_lock_trace_init(dhd);
12376 #endif /* DHD_TRACE_WAKE_LOCK */
12377 }
12378
12379 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
12380 {
12381         DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
12382 #ifdef CONFIG_HAS_WAKELOCK
12383         dhd->wakelock_event_counter = 0;
12384         dhd->wakelock_counter = 0;
12385         dhd->wakelock_rx_timeout_enable = 0;
12386         dhd->wakelock_ctrl_timeout_enable = 0;
12387         wake_lock_destroy(&dhd->wl_wifi);
12388         wake_lock_destroy(&dhd->wl_rxwake);
12389         wake_lock_destroy(&dhd->wl_ctrlwake);
12390         wake_lock_destroy(&dhd->wl_evtwake);
12391 #ifdef BCMPCIE_OOB_HOST_WAKE
12392         wake_lock_destroy(&dhd->wl_intrwake);
12393 #endif /* BCMPCIE_OOB_HOST_WAKE */
12394 #ifdef DHD_USE_SCAN_WAKELOCK
12395         wake_lock_destroy(&dhd->wl_scanwake);
12396 #endif /* DHD_USE_SCAN_WAKELOCK */
12397 #ifdef DHD_TRACE_WAKE_LOCK
12398         dhd_wk_lock_trace_deinit(dhd);
12399 #endif /* DHD_TRACE_WAKE_LOCK */
12400 #endif /* CONFIG_HAS_WAKELOCK */
12401 }
12402
12403 bool dhd_os_check_if_up(dhd_pub_t *pub)
12404 {
12405         if (!pub)
12406                 return FALSE;
12407         return pub->up;
12408 }
12409
12410 /* function to collect firmware, chip id and chip version info */
12411 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
12412 {
12413         int i;
12414
12415         i = snprintf(info_string, sizeof(info_string),
12416                 "  Driver: %s\n  Firmware: %s ", EPI_VERSION_STR, fw);
12417         printf("%s\n", info_string);
12418
12419         if (!dhdp)
12420                 return;
12421
12422         i = snprintf(&info_string[i], sizeof(info_string) - i,
12423                 "\n  Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
12424                 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
12425 }
12426
12427 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
12428 {
12429         int ifidx;
12430         int ret = 0;
12431         dhd_info_t *dhd = NULL;
12432
12433         if (!net || !DEV_PRIV(net)) {
12434                 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
12435                 return -EINVAL;
12436         }
12437
12438         dhd = DHD_DEV_INFO(net);
12439         if (!dhd)
12440                 return -EINVAL;
12441
12442         ifidx = dhd_net2idx(dhd, net);
12443         if (ifidx == DHD_BAD_IF) {
12444                 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
12445                 return -ENODEV;
12446         }
12447
12448         DHD_OS_WAKE_LOCK(&dhd->pub);
12449         DHD_PERIM_LOCK(&dhd->pub);
12450
12451         ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
12452         dhd_check_hang(net, &dhd->pub, ret);
12453
12454         DHD_PERIM_UNLOCK(&dhd->pub);
12455         DHD_OS_WAKE_UNLOCK(&dhd->pub);
12456
12457         return ret;
12458 }
12459
12460 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
12461 {
12462         struct net_device *net;
12463
12464         net = dhd_idx2net(dhdp, ifidx);
12465         if (!net) {
12466                 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
12467                 return FALSE;
12468         }
12469
12470         return dhd_check_hang(net, dhdp, ret);
12471 }
12472
12473 /* Return instance */
12474 int dhd_get_instance(dhd_pub_t *dhdp)
12475 {
12476         return dhdp->info->unit;
12477 }
12478
12479
12480 #ifdef PROP_TXSTATUS
12481
12482 void dhd_wlfc_plat_init(void *dhd)
12483 {
12484 #ifdef USE_DYNAMIC_F2_BLKSIZE
12485         dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12486 #endif /* USE_DYNAMIC_F2_BLKSIZE */
12487         return;
12488 }
12489
12490 void dhd_wlfc_plat_deinit(void *dhd)
12491 {
12492 #ifdef USE_DYNAMIC_F2_BLKSIZE
12493         dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
12494 #endif /* USE_DYNAMIC_F2_BLKSIZE */
12495         return;
12496 }
12497
12498 bool dhd_wlfc_skip_fc(void)
12499 {
12500 #ifdef SKIP_WLFC_ON_CONCURRENT
12501 #ifdef WL_CFG80211
12502
12503         /* enable flow control in vsdb mode */
12504         return !(wl_cfg80211_is_concurrent_mode());
12505 #else
12506         return TRUE; /* skip flow control */
12507 #endif /* WL_CFG80211 */
12508
12509 #else
12510         return FALSE;
12511 #endif /* SKIP_WLFC_ON_CONCURRENT */
12512 }
12513 #endif /* PROP_TXSTATUS */
12514
12515 #ifdef BCMDBGFS
12516 #include <linux/debugfs.h>
12517
12518 typedef struct dhd_dbgfs {
12519         struct dentry   *debugfs_dir;
12520         struct dentry   *debugfs_mem;
12521         dhd_pub_t       *dhdp;
12522         uint32          size;
12523 } dhd_dbgfs_t;
12524
12525 dhd_dbgfs_t g_dbgfs;
12526
12527 extern uint32 dhd_readregl(void *bp, uint32 addr);
12528 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
12529
12530 static int
12531 dhd_dbg_state_open(struct inode *inode, struct file *file)
12532 {
12533         file->private_data = inode->i_private;
12534         return 0;
12535 }
12536
12537 static ssize_t
12538 dhd_dbg_state_read(struct file *file, char __user *ubuf,
12539                        size_t count, loff_t *ppos)
12540 {
12541         ssize_t rval;
12542         uint32 tmp;
12543         loff_t pos = *ppos;
12544         size_t ret;
12545
12546         if (pos < 0)
12547                 return -EINVAL;
12548         if (pos >= g_dbgfs.size || !count)
12549                 return 0;
12550         if (count > g_dbgfs.size - pos)
12551                 count = g_dbgfs.size - pos;
12552
12553         /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
12554         tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
12555
12556         ret = copy_to_user(ubuf, &tmp, 4);
12557         if (ret == count)
12558                 return -EFAULT;
12559
12560         count -= ret;
12561         *ppos = pos + count;
12562         rval = count;
12563
12564         return rval;
12565 }
12566
12567
12568 static ssize_t
12569 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
12570 {
12571         loff_t pos = *ppos;
12572         size_t ret;
12573         uint32 buf;
12574
12575         if (pos < 0)
12576                 return -EINVAL;
12577         if (pos >= g_dbgfs.size || !count)
12578                 return 0;
12579         if (count > g_dbgfs.size - pos)
12580                 count = g_dbgfs.size - pos;
12581
12582         ret = copy_from_user(&buf, ubuf, sizeof(uint32));
12583         if (ret == count)
12584                 return -EFAULT;
12585
12586         /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
12587         dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
12588
12589         return count;
12590 }
12591
12592
12593 loff_t
12594 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
12595 {
12596         loff_t pos = -1;
12597
12598         switch (whence) {
12599                 case 0:
12600                         pos = off;
12601                         break;
12602                 case 1:
12603                         pos = file->f_pos + off;
12604                         break;
12605                 case 2:
12606                         pos = g_dbgfs.size - off;
12607         }
12608         return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
12609 }
12610
12611 static const struct file_operations dhd_dbg_state_ops = {
12612         .read   = dhd_dbg_state_read,
12613         .write  = dhd_debugfs_write,
12614         .open   = dhd_dbg_state_open,
12615         .llseek = dhd_debugfs_lseek
12616 };
12617
12618 static void dhd_dbg_create(void)
12619 {
12620         if (g_dbgfs.debugfs_dir) {
12621                 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
12622                         NULL, &dhd_dbg_state_ops);
12623         }
12624 }
12625
12626 void dhd_dbg_init(dhd_pub_t *dhdp)
12627 {
12628         g_dbgfs.dhdp = dhdp;
12629         g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
12630
12631         g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
12632         if (IS_ERR(g_dbgfs.debugfs_dir)) {
12633                 g_dbgfs.debugfs_dir = NULL;
12634                 return;
12635         }
12636
12637         dhd_dbg_create();
12638
12639         return;
12640 }
12641
12642 void dhd_dbg_remove(void)
12643 {
12644         debugfs_remove(g_dbgfs.debugfs_mem);
12645         debugfs_remove(g_dbgfs.debugfs_dir);
12646
12647         bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
12648 }
12649 #endif /* BCMDBGFS */
12650
12651 #ifdef WLMEDIA_HTSF
12652
12653 static
12654 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
12655 {
12656         dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12657         struct sk_buff *skb;
12658         uint32 htsf = 0;
12659         uint16 dport = 0, oldmagic = 0xACAC;
12660         char *p1;
12661         htsfts_t ts;
12662
12663         /*  timestamp packet  */
12664
12665         p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
12666
12667         if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
12668 /*              memcpy(&proto, p1+26, 4);       */
12669                 memcpy(&dport, p1+40, 2);
12670 /*      proto = ((ntoh32(proto))>> 16) & 0xFF;  */
12671                 dport = ntoh16(dport);
12672         }
12673
12674         /* timestamp only if  icmp or udb iperf with port 5555 */
12675 /*      if (proto == 17 && dport == tsport) { */
12676         if (dport >= tsport && dport <= tsport + 20) {
12677
12678                 skb = (struct sk_buff *) pktbuf;
12679
12680                 htsf = dhd_get_htsf(dhd, 0);
12681                 memset(skb->data + 44, 0, 2); /* clear checksum */
12682                 memcpy(skb->data+82, &oldmagic, 2);
12683                 memcpy(skb->data+84, &htsf, 4);
12684
12685                 memset(&ts, 0, sizeof(htsfts_t));
12686                 ts.magic  = HTSFMAGIC;
12687                 ts.prio   = PKTPRIO(pktbuf);
12688                 ts.seqnum = htsf_seqnum++;
12689                 ts.c10    = get_cycles();
12690                 ts.t10    = htsf;
12691                 ts.endmagic = HTSFENDMAGIC;
12692
12693                 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
12694         }
12695 }
12696
12697 static void dhd_dump_htsfhisto(histo_t *his, char *s)
12698 {
12699         int pktcnt = 0, curval = 0, i;
12700         for (i = 0; i < (NUMBIN-2); i++) {
12701                 curval += 500;
12702                 printf("%d ",  his->bin[i]);
12703                 pktcnt += his->bin[i];
12704         }
12705         printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
12706                 his->bin[NUMBIN-1], s);
12707 }
12708
12709 static
12710 void sorttobin(int value, histo_t *histo)
12711 {
12712         int i, binval = 0;
12713
12714         if (value < 0) {
12715                 histo->bin[NUMBIN-1]++;
12716                 return;
12717         }
12718         if (value > histo->bin[NUMBIN-2])  /* store the max value  */
12719                 histo->bin[NUMBIN-2] = value;
12720
12721         for (i = 0; i < (NUMBIN-2); i++) {
12722                 binval += 500; /* 500m s bins */
12723                 if (value <= binval) {
12724                         histo->bin[i]++;
12725                         return;
12726                 }
12727         }
12728         histo->bin[NUMBIN-3]++;
12729 }
12730
12731 static
12732 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
12733 {
12734         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12735         struct sk_buff *skb;
12736         char *p1;
12737         uint16 old_magic;
12738         int d1, d2, d3, end2end;
12739         htsfts_t *htsf_ts;
12740         uint32 htsf;
12741
12742         skb = PKTTONATIVE(dhdp->osh, pktbuf);
12743         p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
12744
12745         if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
12746                 memcpy(&old_magic, p1+78, 2);
12747                 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
12748         } else {
12749                 return;
12750         }
12751         if (htsf_ts->magic == HTSFMAGIC) {
12752                 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
12753                 htsf_ts->cE0 = get_cycles();
12754         }
12755
12756         if (old_magic == 0xACAC) {
12757
12758                 tspktcnt++;
12759                 htsf = dhd_get_htsf(dhd, 0);
12760                 memcpy(skb->data+92, &htsf, sizeof(uint32));
12761
12762                 memcpy(&ts[tsidx].t1, skb->data+80, 16);
12763
12764                 d1 = ts[tsidx].t2 - ts[tsidx].t1;
12765                 d2 = ts[tsidx].t3 - ts[tsidx].t2;
12766                 d3 = ts[tsidx].t4 - ts[tsidx].t3;
12767                 end2end = ts[tsidx].t4 - ts[tsidx].t1;
12768
12769                 sorttobin(d1, &vi_d1);
12770                 sorttobin(d2, &vi_d2);
12771                 sorttobin(d3, &vi_d3);
12772                 sorttobin(end2end, &vi_d4);
12773
12774                 if (end2end > 0 && end2end >  maxdelay) {
12775                         maxdelay = end2end;
12776                         maxdelaypktno = tspktcnt;
12777                         memcpy(&maxdelayts, &ts[tsidx], 16);
12778                 }
12779                 if (++tsidx >= TSMAX)
12780                         tsidx = 0;
12781         }
12782 }
12783
12784 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
12785 {
12786         uint32 htsf = 0, cur_cycle, delta, delta_us;
12787         uint32    factor, baseval, baseval2;
12788         cycles_t t;
12789
12790         t = get_cycles();
12791         cur_cycle = t;
12792
12793         if (cur_cycle >  dhd->htsf.last_cycle) {
12794                 delta = cur_cycle -  dhd->htsf.last_cycle;
12795         } else {
12796                 delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
12797         }
12798
12799         delta = delta >> 4;
12800
12801         if (dhd->htsf.coef) {
12802                 /* times ten to get the first digit */
12803                 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
12804                 baseval  = (delta*10)/factor;
12805                 baseval2 = (delta*10)/(factor+1);
12806                 delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
12807                 htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
12808         } else {
12809                 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
12810         }
12811
12812         return htsf;
12813 }
12814
12815 static void dhd_dump_latency(void)
12816 {
12817         int i, max = 0;
12818         int d1, d2, d3, d4, d5;
12819
12820         printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
12821         for (i = 0; i < TSMAX; i++) {
12822                 d1 = ts[i].t2 - ts[i].t1;
12823                 d2 = ts[i].t3 - ts[i].t2;
12824                 d3 = ts[i].t4 - ts[i].t3;
12825                 d4 = ts[i].t4 - ts[i].t1;
12826                 d5 = ts[max].t4-ts[max].t1;
12827                 if (d4 > d5 && d4 > 0)  {
12828                         max = i;
12829                 }
12830                 printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
12831                         ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
12832                         d1, d2, d3, d4, i);
12833         }
12834
12835         printf("current idx = %d \n", tsidx);
12836
12837         printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
12838         printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
12839         maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
12840         maxdelayts.t2 - maxdelayts.t1,
12841         maxdelayts.t3 - maxdelayts.t2,
12842         maxdelayts.t4 - maxdelayts.t3,
12843         maxdelayts.t4 - maxdelayts.t1);
12844 }
12845
12846
12847 static int
12848 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
12849 {
12850         wl_ioctl_t ioc;
12851         char buf[32];
12852         int ret;
12853         uint32 s1, s2;
12854
12855         struct tsf {
12856                 uint32 low;
12857                 uint32 high;
12858         } tsf_buf;
12859
12860         memset(&ioc, 0, sizeof(ioc));
12861         memset(&tsf_buf, 0, sizeof(tsf_buf));
12862
12863         ioc.cmd = WLC_GET_VAR;
12864         ioc.buf = buf;
12865         ioc.len = (uint)sizeof(buf);
12866         ioc.set = FALSE;
12867
12868         strncpy(buf, "tsf", sizeof(buf) - 1);
12869         buf[sizeof(buf) - 1] = '\0';
12870         s1 = dhd_get_htsf(dhd, 0);
12871         if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
12872                 if (ret == -EIO) {
12873                         DHD_ERROR(("%s: tsf is not supported by device\n",
12874                                 dhd_ifname(&dhd->pub, ifidx)));
12875                         return -EOPNOTSUPP;
12876                 }
12877                 return ret;
12878         }
12879         s2 = dhd_get_htsf(dhd, 0);
12880
12881         memcpy(&tsf_buf, buf, sizeof(tsf_buf));
12882         printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
12883                 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
12884                 dhd->htsf.coefdec2, s2-tsf_buf.low);
12885         printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
12886         return 0;
12887 }
12888
12889 void htsf_update(dhd_info_t *dhd, void *data)
12890 {
12891         static ulong  cur_cycle = 0, prev_cycle = 0;
12892         uint32 htsf, tsf_delta = 0;
12893         uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
12894         ulong b, a;
12895         cycles_t t;
12896
12897         /* cycles_t in inlcude/mips/timex.h */
12898
12899         t = get_cycles();
12900
12901         prev_cycle = cur_cycle;
12902         cur_cycle = t;
12903
12904         if (cur_cycle > prev_cycle)
12905                 cyc_delta = cur_cycle - prev_cycle;
12906         else {
12907                 b = cur_cycle;
12908                 a = prev_cycle;
12909                 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
12910         }
12911
12912         if (data == NULL)
12913                 printf(" tsf update ata point er is null \n");
12914
12915         memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
12916         memcpy(&cur_tsf, data, sizeof(tsf_t));
12917
12918         if (cur_tsf.low == 0) {
12919                 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
12920                 return;
12921         }
12922
12923         if (cur_tsf.low > prev_tsf.low)
12924                 tsf_delta = (cur_tsf.low - prev_tsf.low);
12925         else {
12926                 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
12927                  cur_tsf.low, prev_tsf.low));
12928                 if (cur_tsf.high > prev_tsf.high) {
12929                         tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
12930                         DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
12931                 } else {
12932                         return; /* do not update */
12933                 }
12934         }
12935
12936         if (tsf_delta)  {
12937                 hfactor = cyc_delta / tsf_delta;
12938                 tmp  =  (cyc_delta - (hfactor * tsf_delta))*10;
12939                 dec1 =  tmp/tsf_delta;
12940                 dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
12941                 tmp  =  (tmp   - (dec1*tsf_delta))*10;
12942                 dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
12943
12944                 if (dec3 > 4) {
12945                         if (dec2 == 9) {
12946                                 dec2 = 0;
12947                                 if (dec1 == 9) {
12948                                         dec1 = 0;
12949                                         hfactor++;
12950                                 } else {
12951                                         dec1++;
12952                                 }
12953                         } else {
12954                                 dec2++;
12955                         }
12956                 }
12957         }
12958
12959         if (hfactor) {
12960                 htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
12961                 dhd->htsf.coef = hfactor;
12962                 dhd->htsf.last_cycle = cur_cycle;
12963                 dhd->htsf.last_tsf = cur_tsf.low;
12964                 dhd->htsf.coefdec1 = dec1;
12965                 dhd->htsf.coefdec2 = dec2;
12966         } else {
12967                 htsf = prev_tsf.low;
12968         }
12969 }
12970
12971 #endif /* WLMEDIA_HTSF */
12972
12973 #ifdef CUSTOM_SET_CPUCORE
12974 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
12975 {
12976         int e_dpc = 0, e_rxf = 0, retry_set = 0;
12977
12978         if (!(dhd->chan_isvht80)) {
12979                 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
12980                 return;
12981         }
12982
12983         if (DPC_CPUCORE) {
12984                 do {
12985                         if (set == TRUE) {
12986                                 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12987                                         cpumask_of(DPC_CPUCORE));
12988                         } else {
12989                                 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12990                                         cpumask_of(PRIMARY_CPUCORE));
12991                         }
12992                         if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12993                                 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
12994                                 return;
12995                         }
12996                         if (e_dpc < 0)
12997                                 OSL_SLEEP(1);
12998                 } while (e_dpc < 0);
12999         }
13000         if (RXF_CPUCORE) {
13001                 do {
13002                         if (set == TRUE) {
13003                                 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
13004                                         cpumask_of(RXF_CPUCORE));
13005                         } else {
13006                                 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
13007                                         cpumask_of(PRIMARY_CPUCORE));
13008                         }
13009                         if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
13010                                 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
13011                                 return;
13012                         }
13013                         if (e_rxf < 0)
13014                                 OSL_SLEEP(1);
13015                 } while (e_rxf < 0);
13016         }
13017 #ifdef DHD_OF_SUPPORT
13018         interrupt_set_cpucore(set);
13019 #endif /* DHD_OF_SUPPORT */
13020         DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
13021
13022         return;
13023 }
13024 #endif /* CUSTOM_SET_CPUCORE */
13025
13026 /* Get interface specific ap_isolate configuration */
13027 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
13028 {
13029         dhd_info_t *dhd = dhdp->info;
13030         dhd_if_t *ifp;
13031
13032         ASSERT(idx < DHD_MAX_IFS);
13033
13034         ifp = dhd->iflist[idx];
13035
13036         return ifp->ap_isolate;
13037 }
13038
13039 /* Set interface specific ap_isolate configuration */
13040 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
13041 {
13042         dhd_info_t *dhd = dhdp->info;
13043         dhd_if_t *ifp;
13044
13045         ASSERT(idx < DHD_MAX_IFS);
13046
13047         ifp = dhd->iflist[idx];
13048
13049         ifp->ap_isolate = val;
13050
13051         return 0;
13052 }
13053
13054 #ifdef DHD_FW_COREDUMP
13055
13056
13057 #ifdef CUSTOMER_HW4_DEBUG
13058 #ifdef PLATFORM_SLP
13059 #define MEMDUMPINFO "/opt/etc/.memdump.info"
13060 #else
13061 #define MEMDUMPINFO "/data/.memdump.info"
13062 #endif /* PLATFORM_SLP */
13063 #elif defined(CUSTOMER_HW2)
13064 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
13065 #else
13066 #define MEMDUMPINFO "/installmedia/.memdump.info"
13067 #endif /* CUSTOMER_HW4_DEBUG */
13068
13069 void dhd_get_memdump_info(dhd_pub_t *dhd)
13070 {
13071         struct file *fp = NULL;
13072         uint32 mem_val = DUMP_MEMFILE_MAX;
13073         int ret = 0;
13074         char *filepath = MEMDUMPINFO;
13075
13076         /* Read memdump info from the file */
13077         fp = filp_open(filepath, O_RDONLY, 0);
13078         if (IS_ERR(fp)) {
13079                 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13080                 goto done;
13081         } else {
13082                 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13083                 if (ret < 0) {
13084                         DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13085                         filp_close(fp, NULL);
13086                         goto done;
13087                 }
13088
13089                 mem_val = bcm_atoi((char *)&mem_val);
13090
13091                 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
13092                 filp_close(fp, NULL);
13093         }
13094
13095 done:
13096 #ifdef CUSTOMER_HW4_DEBUG
13097         dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
13098 #else
13099         dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
13100 #endif /* CUSTOMER_HW4_DEBUG */
13101 }
13102
13103
13104 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
13105 {
13106         dhd_dump_t *dump = NULL;
13107         dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
13108         if (dump == NULL) {
13109                 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
13110                 return;
13111         }
13112         dump->buf = buf;
13113         dump->bufsize = size;
13114
13115 #if defined(CONFIG_ARM64)
13116         DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
13117                 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
13118 #elif defined(__ARM_ARCH_7A__)
13119         DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
13120                 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
13121 #endif /* __ARM_ARCH_7A__ */
13122         if (dhdp->memdump_enabled == DUMP_MEMONLY) {
13123                 BUG_ON(1);
13124         }
13125
13126 #ifdef DHD_LOG_DUMP
13127         if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
13128                 dhd_schedule_log_dump(dhdp);
13129         }
13130 #endif /* DHD_LOG_DUMP */
13131         dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
13132                 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
13133 }
13134 static void
13135 dhd_mem_dump(void *handle, void *event_info, u8 event)
13136 {
13137         dhd_info_t *dhd = handle;
13138         dhd_dump_t *dump = event_info;
13139
13140         if (!dhd) {
13141                 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13142                 return;
13143         }
13144
13145         if (!dump) {
13146                 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
13147                 return;
13148         }
13149
13150         if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
13151                 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
13152         }
13153
13154         if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
13155 #ifdef DHD_LOG_DUMP
13156                 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
13157 #endif
13158                 TRUE) {
13159                 BUG_ON(1);
13160         }
13161         MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
13162 }
13163 #endif /* DHD_FW_COREDUMP */
13164
13165 #ifdef DHD_LOG_DUMP
13166 static void
13167 dhd_log_dump(void *handle, void *event_info, u8 event)
13168 {
13169         dhd_info_t *dhd = handle;
13170
13171         if (!dhd) {
13172                 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13173                 return;
13174         }
13175
13176         if (do_dhd_log_dump(&dhd->pub)) {
13177                 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
13178                 return;
13179         }
13180 }
13181
13182 void dhd_schedule_log_dump(dhd_pub_t *dhdp)
13183 {
13184         dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
13185                 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
13186                 dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
13187 }
13188
13189 static int
13190 do_dhd_log_dump(dhd_pub_t *dhdp)
13191 {
13192         int ret = 0;
13193         struct file *fp = NULL;
13194         mm_segment_t old_fs;
13195         loff_t pos = 0;
13196         char dump_path[128];
13197         char common_info[1024];
13198         struct timeval curtime;
13199         uint32 file_mode;
13200         unsigned long flags = 0;
13201
13202         if (!dhdp) {
13203                 return -1;
13204         }
13205
13206         /* Building the additional information like DHD, F/W version */
13207         memset(common_info, 0, sizeof(common_info));
13208         snprintf(common_info, sizeof(common_info),
13209                 "---------- Common information ----------\n"
13210                 "DHD version: %s\n"
13211                 "F/W version: %s\n"
13212                 "----------------------------------------\n",
13213                 dhd_version, fw_version);
13214
13215         /* change to KERNEL_DS address limit */
13216         old_fs = get_fs();
13217         set_fs(KERNEL_DS);
13218
13219         /* Init file name */
13220         memset(dump_path, 0, sizeof(dump_path));
13221         do_gettimeofday(&curtime);
13222         snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
13223                 DHD_COMMON_DUMP_PATH "debug_dump",
13224                 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
13225         file_mode = O_CREAT | O_WRONLY | O_SYNC;
13226
13227         DHD_ERROR(("debug_dump_path = %s\n", dump_path));
13228         fp = filp_open(dump_path, file_mode, 0644);
13229         if (IS_ERR(fp)) {
13230                 ret = PTR_ERR(fp);
13231                 DHD_ERROR(("open file error, err = %d\n", ret));
13232                 ret = -1;
13233                 goto exit;
13234         }
13235
13236         fp->f_op->write(fp, common_info, strlen(common_info), &pos);
13237         if (dhdp->dld_buf.wraparound) {
13238                 fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
13239         } else {
13240                 fp->f_op->write(fp, dhdp->dld_buf.buffer,
13241                         (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
13242         }
13243
13244         /* re-init dhd_log_dump_buf structure */
13245         spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
13246         dhdp->dld_buf.wraparound = 0;
13247         dhdp->dld_buf.present = dhdp->dld_buf.front;
13248         dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13249         bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13250         spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
13251 exit:
13252         if (!ret) {
13253                 filp_close(fp, NULL);
13254         }
13255         set_fs(old_fs);
13256
13257         return ret;
13258 }
13259 #endif /* DHD_LOG_DUMP */
13260
13261 #ifdef BCMASSERT_LOG
13262 #ifdef CUSTOMER_HW4_DEBUG
13263 #ifdef PLATFORM_SLP
13264 #define ASSERTINFO "/opt/etc/.assert.info"
13265 #else
13266 #define ASSERTINFO "/data/.assert.info"
13267 #endif /* PLATFORM_SLP */
13268 #elif defined(CUSTOMER_HW2)
13269 #define ASSERTINFO "/data/misc/wifi/.assert.info"
13270 #else
13271 #define ASSERTINFO "/installmedia/.assert.info"
13272 #endif /* CUSTOMER_HW4_DEBUG */
13273 void dhd_get_assert_info(dhd_pub_t *dhd)
13274 {
13275         struct file *fp = NULL;
13276         char *filepath = ASSERTINFO;
13277
13278         /*
13279          * Read assert info from the file
13280          * 0: Trigger Kernel crash by panic()
13281          * 1: Print out the logs and don't trigger Kernel panic. (default)
13282          * 2: Trigger Kernel crash by BUG()
13283          * File doesn't exist: Keep default value (1).
13284          */
13285         fp = filp_open(filepath, O_RDONLY, 0);
13286         if (IS_ERR(fp)) {
13287                 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13288         } else {
13289                 int mem_val = 0;
13290                 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13291                 if (ret < 0) {
13292                         DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13293                 } else {
13294                         mem_val = bcm_atoi((char *)&mem_val);
13295                         DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
13296                         g_assert_type = mem_val;
13297                 }
13298                 filp_close(fp, NULL);
13299         }
13300 }
13301 #endif /* BCMASSERT_LOG */
13302
13303
13304 #ifdef DHD_WMF
13305 /* Returns interface specific WMF configuration */
13306 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
13307 {
13308         dhd_info_t *dhd = dhdp->info;
13309         dhd_if_t *ifp;
13310
13311         ASSERT(idx < DHD_MAX_IFS);
13312
13313         ifp = dhd->iflist[idx];
13314         return &ifp->wmf;
13315 }
13316 #endif /* DHD_WMF */
13317
13318
13319 #if defined(DHD_L2_FILTER)
13320 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
13321 {
13322         return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
13323 }
13324 #endif 
13325
13326 #ifdef DHD_L2_FILTER
13327 arp_table_t*
13328 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
13329 {
13330         dhd_info_t *dhd = dhdp->info;
13331         dhd_if_t *ifp;
13332
13333         ASSERT(bssidx < DHD_MAX_IFS);
13334
13335         ifp = dhd->iflist[bssidx];
13336         return ifp->phnd_arp_table;
13337 }
13338
13339 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
13340 {
13341         dhd_info_t *dhd = dhdp->info;
13342         dhd_if_t *ifp;
13343
13344         ASSERT(idx < DHD_MAX_IFS);
13345
13346         ifp = dhd->iflist[idx];
13347
13348         if (ifp)
13349                 return ifp->parp_enable;
13350         else
13351                 return FALSE;
13352 }
13353
13354 /* Set interface specific proxy arp configuration */
13355 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13356 {
13357         dhd_info_t *dhd = dhdp->info;
13358         dhd_if_t *ifp;
13359         ASSERT(idx < DHD_MAX_IFS);
13360         ifp = dhd->iflist[idx];
13361
13362         if (!ifp)
13363             return BCME_ERROR;
13364
13365         /* At present all 3 variables are being
13366          * handled at once
13367          */
13368         ifp->parp_enable = val;
13369         ifp->parp_discard = val;
13370         ifp->parp_allnode = !val;
13371
13372         /* Flush ARP entries when disabled */
13373         if (val == FALSE) {
13374                 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
13375                         FALSE, dhdp->tickcnt);
13376         }
13377         return BCME_OK;
13378 }
13379
13380 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13381 {
13382         dhd_info_t *dhd = dhdp->info;
13383         dhd_if_t *ifp;
13384
13385         ASSERT(idx < DHD_MAX_IFS);
13386
13387         ifp = dhd->iflist[idx];
13388
13389         ASSERT(ifp);
13390         return ifp->parp_discard;
13391 }
13392
13393 bool
13394 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13395 {
13396         dhd_info_t *dhd = dhdp->info;
13397         dhd_if_t *ifp;
13398
13399         ASSERT(idx < DHD_MAX_IFS);
13400
13401         ifp = dhd->iflist[idx];
13402
13403         ASSERT(ifp);
13404
13405         return ifp->parp_allnode;
13406 }
13407
13408 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
13409 {
13410         dhd_info_t *dhd = dhdp->info;
13411         dhd_if_t *ifp;
13412
13413         ASSERT(idx < DHD_MAX_IFS);
13414
13415         ifp = dhd->iflist[idx];
13416
13417         ASSERT(ifp);
13418
13419         return ifp->dhcp_unicast;
13420 }
13421
13422 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
13423 {
13424         dhd_info_t *dhd = dhdp->info;
13425         dhd_if_t *ifp;
13426         ASSERT(idx < DHD_MAX_IFS);
13427         ifp = dhd->iflist[idx];
13428
13429         ASSERT(ifp);
13430
13431         ifp->dhcp_unicast = val;
13432         return BCME_OK;
13433 }
13434
13435 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
13436 {
13437         dhd_info_t *dhd = dhdp->info;
13438         dhd_if_t *ifp;
13439
13440         ASSERT(idx < DHD_MAX_IFS);
13441
13442         ifp = dhd->iflist[idx];
13443
13444         ASSERT(ifp);
13445
13446         return ifp->block_ping;
13447 }
13448
13449 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
13450 {
13451         dhd_info_t *dhd = dhdp->info;
13452         dhd_if_t *ifp;
13453         ASSERT(idx < DHD_MAX_IFS);
13454         ifp = dhd->iflist[idx];
13455
13456         ASSERT(ifp);
13457
13458         ifp->block_ping = val;
13459
13460         return BCME_OK;
13461 }
13462
13463 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
13464 {
13465         dhd_info_t *dhd = dhdp->info;
13466         dhd_if_t *ifp;
13467
13468         ASSERT(idx < DHD_MAX_IFS);
13469
13470         ifp = dhd->iflist[idx];
13471
13472         ASSERT(ifp);
13473
13474         return ifp->grat_arp;
13475 }
13476
13477 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13478 {
13479         dhd_info_t *dhd = dhdp->info;
13480         dhd_if_t *ifp;
13481         ASSERT(idx < DHD_MAX_IFS);
13482         ifp = dhd->iflist[idx];
13483
13484         ASSERT(ifp);
13485
13486         ifp->grat_arp = val;
13487
13488         return BCME_OK;
13489 }
13490 #endif /* DHD_L2_FILTER */
13491
13492
13493 #if defined(SET_RPS_CPUS)
13494 int dhd_rps_cpus_enable(struct net_device *net, int enable)
13495 {
13496         dhd_info_t *dhd = DHD_DEV_INFO(net);
13497         dhd_if_t *ifp;
13498         int ifidx;
13499         char * RPS_CPU_SETBUF;
13500
13501         ifidx = dhd_net2idx(dhd, net);
13502         if (ifidx == DHD_BAD_IF) {
13503                 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
13504                 return -ENODEV;
13505         }
13506
13507         if (ifidx == PRIMARY_INF) {
13508                 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
13509                         DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
13510                         RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
13511                 } else {
13512                         DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
13513                         RPS_CPU_SETBUF = RPS_CPUS_MASK;
13514                 }
13515         } else if (ifidx == VIRTUAL_INF) {
13516                 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
13517                 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
13518         } else {
13519                 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
13520                 return -EINVAL;
13521         }
13522
13523         ifp = dhd->iflist[ifidx];
13524         if (ifp) {
13525                 if (enable) {
13526                         DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
13527                         custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
13528                 } else {
13529                         custom_rps_map_clear(ifp->net->_rx);
13530                 }
13531         } else {
13532                 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
13533                 return -ENODEV;
13534         }
13535         return BCME_OK;
13536 }
13537
13538 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
13539 {
13540         struct rps_map *old_map, *map;
13541         cpumask_var_t mask;
13542         int err, cpu, i;
13543         static DEFINE_SPINLOCK(rps_map_lock);
13544
13545         DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13546
13547         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
13548                 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
13549                 return -ENOMEM;
13550         }
13551
13552         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
13553         if (err) {
13554                 free_cpumask_var(mask);
13555                 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
13556                 return err;
13557         }
13558
13559         map = kzalloc(max_t(unsigned int,
13560                 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
13561                 GFP_KERNEL);
13562         if (!map) {
13563                 free_cpumask_var(mask);
13564                 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
13565                 return -ENOMEM;
13566         }
13567
13568         i = 0;
13569         for_each_cpu(cpu, mask) {
13570                 map->cpus[i++] = cpu;
13571         }
13572
13573         if (i) {
13574                 map->len = i;
13575         } else {
13576                 kfree(map);
13577                 map = NULL;
13578                 free_cpumask_var(mask);
13579                 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
13580                 return -1;
13581         }
13582
13583         spin_lock(&rps_map_lock);
13584         old_map = rcu_dereference_protected(queue->rps_map,
13585                 lockdep_is_held(&rps_map_lock));
13586         rcu_assign_pointer(queue->rps_map, map);
13587         spin_unlock(&rps_map_lock);
13588
13589         if (map) {
13590                 static_key_slow_inc(&rps_needed);
13591         }
13592         if (old_map) {
13593                 kfree_rcu(old_map, rcu);
13594                 static_key_slow_dec(&rps_needed);
13595         }
13596         free_cpumask_var(mask);
13597
13598         DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
13599         return map->len;
13600 }
13601
13602 void custom_rps_map_clear(struct netdev_rx_queue *queue)
13603 {
13604         struct rps_map *map;
13605
13606         DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13607
13608         map = rcu_dereference_protected(queue->rps_map, 1);
13609         if (map) {
13610                 RCU_INIT_POINTER(queue->rps_map, NULL);
13611                 kfree_rcu(map, rcu);
13612                 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
13613         }
13614 }
13615 #endif 
13616
13617
13618
13619 #ifdef DHD_DEBUG_PAGEALLOC
13620
13621 void
13622 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
13623 {
13624         dhd_pub_t *dhdp = (dhd_pub_t *)handle;
13625
13626         DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
13627                 __FUNCTION__, addr_corrupt, (uint32)len));
13628
13629         DHD_OS_WAKE_LOCK(dhdp);
13630         prhex("Page Corruption:", addr_corrupt, len);
13631         dhd_dump_to_kernelog(dhdp);
13632 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13633         /* Load the dongle side dump to host memory and then BUG_ON() */
13634         dhdp->memdump_enabled = DUMP_MEMONLY;
13635         dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
13636         dhd_bus_mem_dump(dhdp);
13637 #endif /* BCMPCIE && DHD_FW_COREDUMP */
13638         DHD_OS_WAKE_UNLOCK(dhdp);
13639 }
13640 EXPORT_SYMBOL(dhd_page_corrupt_cb);
13641 #endif /* DHD_DEBUG_PAGEALLOC */
13642
13643 #ifdef DHD_PKTID_AUDIT_ENABLED
13644 void
13645 dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
13646 {
13647         DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
13648         DHD_OS_WAKE_LOCK(dhdp);
13649         dhd_dump_to_kernelog(dhdp);
13650 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13651         /* Load the dongle side dump to host memory and then BUG_ON() */
13652         dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
13653         dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
13654         dhd_bus_mem_dump(dhdp);
13655 #endif /* BCMPCIE && DHD_FW_COREDUMP */
13656         DHD_OS_WAKE_UNLOCK(dhdp);
13657 }
13658 #endif /* DHD_PKTID_AUDIT_ENABLED */
13659
13660 /* ----------------------------------------------------------------------------
13661  * Infrastructure code for sysfs interface support for DHD
13662  *
13663  * What is sysfs interface?
13664  * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
13665  *
13666  * Why sysfs interface?
13667  * This is the Linux standard way of changing/configuring Run Time parameters
13668  * for a driver. We can use this interface to control "linux" specific driver
13669  * parameters.
13670  *
13671  * -----------------------------------------------------------------------------
13672  */
13673
13674 #include <linux/sysfs.h>
13675 #include <linux/kobject.h>
13676
13677 #if defined(DHD_TRACE_WAKE_LOCK)
13678
13679 /* Function to show the history buffer */
13680 static ssize_t
13681 show_wklock_trace(struct dhd_info *dev, char *buf)
13682 {
13683         ssize_t ret = 0;
13684         dhd_info_t *dhd = (dhd_info_t *)dev;
13685
13686         buf[ret] = '\n';
13687         buf[ret+1] = 0;
13688
13689         dhd_wk_lock_stats_dump(&dhd->pub);
13690         return ret+1;
13691 }
13692
13693 /* Function to enable/disable wakelock trace */
13694 static ssize_t
13695 wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
13696 {
13697         unsigned long onoff;
13698         unsigned long flags;
13699         dhd_info_t *dhd = (dhd_info_t *)dev;
13700
13701         onoff = bcm_strtoul(buf, NULL, 10);
13702         if (onoff != 0 && onoff != 1) {
13703                 return -EINVAL;
13704         }
13705
13706         spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
13707         trace_wklock_onoff = onoff;
13708         spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
13709         if (trace_wklock_onoff) {
13710                 printk("ENABLE WAKLOCK TRACE\n");
13711         } else {
13712                 printk("DISABLE WAKELOCK TRACE\n");
13713         }
13714
13715         return (ssize_t)(onoff+1);
13716 }
13717 #endif /* DHD_TRACE_WAKE_LOCK */
13718
13719 /*
13720  * Generic Attribute Structure for DHD.
13721  * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
13722  * to instantiate an object of type dhd_attr,  populate it with
13723  * the required show/store functions (ex:- dhd_attr_cpumask_primary)
13724  * and add the object to default_attrs[] array, that gets registered
13725  * to the kobject of dhd (named bcm-dhd).
13726  */
13727
13728 struct dhd_attr {
13729         struct attribute attr;
13730         ssize_t(*show)(struct dhd_info *, char *);
13731         ssize_t(*store)(struct dhd_info *, const char *, size_t count);
13732 };
13733
13734 #if defined(DHD_TRACE_WAKE_LOCK)
13735 static struct dhd_attr dhd_attr_wklock =
13736         __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
13737 #endif /* defined(DHD_TRACE_WAKE_LOCK */
13738
13739 /* Attribute object that gets registered with "bcm-dhd" kobject tree */
13740 static struct attribute *default_attrs[] = {
13741 #if defined(DHD_TRACE_WAKE_LOCK)
13742         &dhd_attr_wklock.attr,
13743 #endif
13744         NULL
13745 };
13746
13747 #define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
13748 #define to_attr(a) container_of(a, struct dhd_attr, attr)
13749
13750 /*
13751  * bcm-dhd kobject show function, the "attr" attribute specifices to which
13752  * node under "bcm-dhd" the show function is called.
13753  */
13754 static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
13755 {
13756         dhd_info_t *dhd = to_dhd(kobj);
13757         struct dhd_attr *d_attr = to_attr(attr);
13758         int ret;
13759
13760         if (d_attr->show)
13761                 ret = d_attr->show(dhd, buf);
13762         else
13763                 ret = -EIO;
13764
13765         return ret;
13766 }
13767
13768
13769 /*
13770  * bcm-dhd kobject show function, the "attr" attribute specifices to which
13771  * node under "bcm-dhd" the store function is called.
13772  */
13773 static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
13774         const char *buf, size_t count)
13775 {
13776         dhd_info_t *dhd = to_dhd(kobj);
13777         struct dhd_attr *d_attr = to_attr(attr);
13778         int ret;
13779
13780         if (d_attr->store)
13781                 ret = d_attr->store(dhd, buf, count);
13782         else
13783                 ret = -EIO;
13784
13785         return ret;
13786
13787 }
13788
13789 static struct sysfs_ops dhd_sysfs_ops = {
13790         .show = dhd_show,
13791         .store = dhd_store,
13792 };
13793
13794 static struct kobj_type dhd_ktype = {
13795         .sysfs_ops = &dhd_sysfs_ops,
13796         .default_attrs = default_attrs,
13797 };
13798
13799 /* Create a kobject and attach to sysfs interface */
13800 static int dhd_sysfs_init(dhd_info_t *dhd)
13801 {
13802         int ret = -1;
13803
13804         if (dhd == NULL) {
13805                 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13806                 return ret;
13807         }
13808
13809         /* Initialize the kobject */
13810         ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
13811         if (ret) {
13812                 kobject_put(&dhd->dhd_kobj);
13813                 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
13814                 return ret;
13815         }
13816
13817         /*
13818          * We are always responsible for sending the uevent that the kobject
13819          * was added to the system.
13820          */
13821         kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
13822
13823         return ret;
13824 }
13825
13826 /* Done with the kobject and detach the sysfs interface */
13827 static void dhd_sysfs_exit(dhd_info_t *dhd)
13828 {
13829         if (dhd == NULL) {
13830                 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13831                 return;
13832         }
13833
13834         /* Releae the kobject */
13835         kobject_put(&dhd->dhd_kobj);
13836 }
13837
13838 #ifdef DHD_LOG_DUMP
13839 void
13840 dhd_log_dump_init(dhd_pub_t *dhd)
13841 {
13842         spin_lock_init(&dhd->dld_buf.lock);
13843 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13844         dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
13845                 DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
13846 #else
13847         dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13848 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13849
13850         if (!dhd->dld_buf.buffer) {
13851                 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13852                 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
13853
13854                 if (!dhd->dld_buf.buffer) {
13855                         DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
13856                         return;
13857                 }
13858         }
13859
13860         dhd->dld_buf.wraparound = 0;
13861         dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
13862         dhd->dld_buf.present = dhd->dld_buf.buffer;
13863         dhd->dld_buf.front = dhd->dld_buf.buffer;
13864         dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13865         dhd->dld_enable = 1;
13866 }
13867
13868 void
13869 dhd_log_dump_deinit(dhd_pub_t *dhd)
13870 {
13871         dhd->dld_enable = 0;
13872 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13873         DHD_OS_PREFREE(dhd,
13874                 dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13875 #else
13876         kfree(dhd->dld_buf.buffer);
13877 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13878 }
13879
13880 void
13881 dhd_log_dump_print(const char *fmt, ...)
13882 {
13883         int len = 0;
13884         char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
13885         va_list args;
13886         dhd_pub_t *dhd = NULL;
13887         unsigned long flags = 0;
13888
13889         if (wl_get_bcm_cfg80211_ptr()) {
13890                 dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
13891         }
13892
13893         if (!dhd || dhd->dld_enable != 1) {
13894                 return;
13895         }
13896
13897         va_start(args, fmt);
13898
13899         len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
13900         if (len < 0) {
13901                 return;
13902         }
13903
13904         /* make a critical section to eliminate race conditions */
13905         spin_lock_irqsave(&dhd->dld_buf.lock, flags);
13906         if (dhd->dld_buf.remain < len) {
13907                 dhd->dld_buf.wraparound = 1;
13908                 dhd->dld_buf.present = dhd->dld_buf.front;
13909                 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13910         }
13911
13912         strncpy(dhd->dld_buf.present, tmp_buf, len);
13913         dhd->dld_buf.remain -= len;
13914         dhd->dld_buf.present += len;
13915         spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
13916
13917         /* double check invalid memory operation */
13918         ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
13919         va_end(args);
13920 }
13921
13922 char*
13923 dhd_log_dump_get_timestamp(void)
13924 {
13925         static char buf[16];
13926         u64 ts_nsec;
13927         unsigned long rem_nsec;
13928
13929         ts_nsec = local_clock();
13930         rem_nsec = do_div(ts_nsec, 1000000000);
13931         snprintf(buf, sizeof(buf), "%5lu.%06lu",
13932                 (unsigned long)ts_nsec, rem_nsec / 1000);
13933
13934         return buf;
13935 }
13936
13937 #endif /* DHD_LOG_DUMP */
13938
13939 /* ---------------------------- End of sysfs implementation ------------------------------------- */
13940
13941 void *dhd_get_pub(struct net_device *dev)
13942 {
13943         dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
13944         if (dhdinfo)
13945                 return (void *)&dhdinfo->pub;
13946         else
13947                 return NULL;
13948 }
13949
13950 bool dhd_os_wd_timer_enabled(void *bus)
13951 {
13952         dhd_pub_t *pub = bus;
13953         dhd_info_t *dhd = (dhd_info_t *)pub->info;
13954
13955         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13956         if (!dhd) {
13957                 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13958                 return FALSE;
13959         }
13960         return dhd->wd_timer_valid;
13961 }