fa38648cd4d7e302846ab2984d1b8913e5cef02a
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_linux.c
1 /*
2  * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3  * Basically selected code segments from usb-cdc.c and usb-rndis.c
4  *
5  * Copyright (C) 1999-2016, Broadcom Corporation
6  * 
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  * 
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  * 
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
29  */
30
31 #include <typedefs.h>
32 #include <linuxver.h>
33 #include <osl.h>
34 #ifdef SHOW_LOGTRACE
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
38
39
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/etherdevice.h>
48 #include <linux/random.h>
49 #include <linux/spinlock.h>
50 #include <linux/ethtool.h>
51 #include <linux/fcntl.h>
52 #include <linux/fs.h>
53 #include <linux/ip.h>
54 #include <linux/reboot.h>
55 #include <linux/notifier.h>
56 #include <net/addrconf.h>
57 #ifdef ENABLE_ADAPTIVE_SCHED
58 #include <linux/cpufreq.h>
59 #endif /* ENABLE_ADAPTIVE_SCHED */
60
61 #include <asm/uaccess.h>
62 #include <asm/unaligned.h>
63
64 #include <epivers.h>
65 #include <bcmutils.h>
66 #include <bcmendian.h>
67 #include <bcmdevs.h>
68
69 #include <proto/ethernet.h>
70 #include <proto/bcmevent.h>
71 #include <proto/vlan.h>
72 #include <proto/802.3.h>
73
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
76 #include <dhd.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
80 #endif
81 #include <dhd_bus.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
84 #ifdef WL_ESCAN
85 #include <wl_escan.h>
86 #endif
87 #include <dhd_dbg.h>
88 #ifdef CONFIG_HAS_WAKELOCK
89 #include <linux/wakelock.h>
90 #endif
91 #ifdef WL_CFG80211
92 #include <wl_cfg80211.h>
93 #endif
94 #ifdef PNO_SUPPORT
95 #include <dhd_pno.h>
96 #endif
97 #ifdef RTT_SUPPORT
98 #include <dhd_rtt.h>
99 #endif
100
101 #ifdef CONFIG_COMPAT
102 #include <linux/compat.h>
103 #endif
104
105 #ifdef DHD_WMF
106 #include <dhd_wmf_linux.h>
107 #endif /* DHD_WMF */
108
109 #ifdef DHD_L2_FILTER
110 #include <proto/bcmicmp.h>
111 #include <bcm_l2_filter.h>
112 #include <dhd_l2_filter.h>
113 #endif /* DHD_L2_FILTER */
114
115 #ifdef DHD_PSTA
116 #include <dhd_psta.h>
117 #endif /* DHD_PSTA */
118
119
120 #ifdef DHDTCPACK_SUPPRESS
121 #include <dhd_ip.h>
122 #endif /* DHDTCPACK_SUPPRESS */
123
124 #ifdef DHD_DEBUG_PAGEALLOC
125 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
126 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
127 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
128 #endif /* DHD_DEBUG_PAGEALLOC */
129
130
131 #if defined(DHD_LB)
132 /* Dynamic CPU selection for load balancing */
133 #include <linux/cpu.h>
134 #include <linux/cpumask.h>
135 #include <linux/notifier.h>
136 #include <linux/workqueue.h>
137 #include <asm/atomic.h>
138
139 #if !defined(DHD_LB_PRIMARY_CPUS)
140 #define DHD_LB_PRIMARY_CPUS     0x0 /* Big CPU coreids mask */
141 #endif
142
143 #if !defined(DHD_LB_SECONDARY_CPUS)
144 #define DHD_LB_SECONDARY_CPUS   0xFE /* Little CPU coreids mask */
145 #endif
146
147 #define HIST_BIN_SIZE   8
148
149 #if defined(DHD_LB_RXP)
150 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
151 #endif /* DHD_LB_RXP */
152
153 #endif /* DHD_LB */
154
155 #ifdef WLMEDIA_HTSF
156 #include <linux/time.h>
157 #include <htsf.h>
158
159 #define HTSF_MINLEN 200    /* min. packet length to timestamp */
160 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
161 #define TSMAX  1000        /* max no. of timing record kept   */
162 #define NUMBIN 34
163
164 static uint32 tsidx = 0;
165 static uint32 htsf_seqnum = 0;
166 uint32 tsfsync;
167 struct timeval tsync;
168 static uint32 tsport = 5010;
169
170 typedef struct histo_ {
171         uint32 bin[NUMBIN];
172 } histo_t;
173
174 #if !ISPOWEROF2(DHD_SDALIGN)
175 #error DHD_SDALIGN is not a power of 2!
176 #endif
177
178 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
179 #endif /* WLMEDIA_HTSF */
180
181 #ifdef STBLINUX
182 #ifdef quote_str
183 #undef quote_str
184 #endif /* quote_str */
185 #ifdef to_str
186 #undef to_str
187 #endif /* quote_str */
188 #define to_str(s) #s
189 #define quote_str(s) to_str(s)
190
191 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
192 #endif /* STBLINUX */
193
194
195 #if defined(SOFTAP)
196 extern bool ap_cfg_running;
197 extern bool ap_fw_loaded;
198 #endif
199 extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
200
201 #ifdef FIX_CPU_MIN_CLOCK
202 #include <linux/pm_qos.h>
203 #endif /* FIX_CPU_MIN_CLOCK */
204 #ifdef SET_RANDOM_MAC_SOFTAP
205 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
206 #define CONFIG_DHD_SET_RANDOM_MAC_VAL   0x001A11
207 #endif
208 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
209 #endif /* SET_RANDOM_MAC_SOFTAP */
210 #ifdef ENABLE_ADAPTIVE_SCHED
211 #define DEFAULT_CPUFREQ_THRESH          1000000 /* threshold frequency : 1000000 = 1GHz */
212 #ifndef CUSTOM_CPUFREQ_THRESH
213 #define CUSTOM_CPUFREQ_THRESH   DEFAULT_CPUFREQ_THRESH
214 #endif /* CUSTOM_CPUFREQ_THRESH */
215 #endif /* ENABLE_ADAPTIVE_SCHED */
216
217 /* enable HOSTIP cache update from the host side when an eth0:N is up */
218 #define AOE_IP_ALIAS_SUPPORT 1
219
220 #ifdef BCM_FD_AGGR
221 #include <bcm_rpc.h>
222 #include <bcm_rpc_tp.h>
223 #endif
224 #ifdef PROP_TXSTATUS
225 #include <wlfc_proto.h>
226 #include <dhd_wlfc.h>
227 #endif
228
229 #include <wl_android.h>
230
231 /* Maximum STA per radio */
232 #define DHD_MAX_STA     32
233
234
235
236 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
237 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
238 #define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
239
240 #ifdef ARP_OFFLOAD_SUPPORT
241 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
242 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
243         unsigned long event, void *ptr);
244 static struct notifier_block dhd_inetaddr_notifier = {
245         .notifier_call = dhd_inetaddr_notifier_call
246 };
247 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
248  * created in kernel notifier link list (with 'next' pointing to itself)
249  */
250 static bool dhd_inetaddr_notifier_registered = FALSE;
251 #endif /* ARP_OFFLOAD_SUPPORT */
252
253 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
254 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
255         unsigned long event, void *ptr);
256 static struct notifier_block dhd_inet6addr_notifier = {
257         .notifier_call = dhd_inet6addr_notifier_call
258 };
259 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
260  * created in kernel notifier link list (with 'next' pointing to itself)
261  */
262 static bool dhd_inet6addr_notifier_registered = FALSE;
263 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
264
265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
266 #include <linux/suspend.h>
267 volatile bool dhd_mmc_suspend = FALSE;
268 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
269 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
270
271 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
272 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
273 #endif 
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
275 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
276 #endif 
277 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
278 MODULE_LICENSE("GPL and additional rights");
279 #endif /* LinuxVer */
280
281 #include <dhd_bus.h>
282
283 #ifdef BCM_FD_AGGR
284 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
285 #else
286 #ifndef PROP_TXSTATUS
287 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
288 #else
289 #define DBUS_RX_BUFFER_SIZE_DHD(net)    (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
290 #endif
291 #endif /* BCM_FD_AGGR */
292
293 #ifdef PROP_TXSTATUS
294 extern bool dhd_wlfc_skip_fc(void);
295 extern void dhd_wlfc_plat_init(void *dhd);
296 extern void dhd_wlfc_plat_deinit(void *dhd);
297 #endif /* PROP_TXSTATUS */
298 #ifdef USE_DYNAMIC_F2_BLKSIZE
299 extern uint sd_f2_blocksize;
300 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
301 #endif /* USE_DYNAMIC_F2_BLKSIZE */
302
303 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
304 const char *
305 print_tainted()
306 {
307         return "";
308 }
309 #endif  /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
310
311 /* Linux wireless extension support */
312 #if defined(WL_WIRELESS_EXT)
313 #include <wl_iw.h>
314 extern wl_iw_extra_params_t  g_wl_iw_params;
315 #endif /* defined(WL_WIRELESS_EXT) */
316
317 #ifdef CONFIG_PARTIALSUSPEND_SLP
318 #include <linux/partialsuspend_slp.h>
319 #define CONFIG_HAS_EARLYSUSPEND
320 #define DHD_USE_EARLYSUSPEND
321 #define register_early_suspend          register_pre_suspend
322 #define unregister_early_suspend        unregister_pre_suspend
323 #define early_suspend                           pre_suspend
324 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN                50
325 #else
326 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
327 #include <linux/earlysuspend.h>
328 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
329 #endif /* CONFIG_PARTIALSUSPEND_SLP */
330
331 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
332
333 #ifdef PKT_FILTER_SUPPORT
334 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
335 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
336 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
337 #endif
338
339
340 #ifdef READ_MACADDR
341 extern int dhd_read_macaddr(struct dhd_info *dhd);
342 #else
343 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
344 #endif
345 #ifdef WRITE_MACADDR
346 extern int dhd_write_macaddr(struct ether_addr *mac);
347 #else
348 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
349 #endif
350
351
352
353
354
355 #ifdef DHD_FW_COREDUMP
356 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
357 #endif /* DHD_FW_COREDUMP */
358 #ifdef DHD_LOG_DUMP
359 static void dhd_log_dump_init(dhd_pub_t *dhd);
360 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
361 static void dhd_log_dump(void *handle, void *event_info, u8 event);
362 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
363 static int do_dhd_log_dump(dhd_pub_t *dhdp);
364 #endif /* DHD_LOG_DUMP */
365
366 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
367 static struct notifier_block dhd_reboot_notifier = {
368         .notifier_call = dhd_reboot_callback,
369         .priority = 1,
370 };
371
372 #ifdef BCMPCIE
373 static int is_reboot = 0;
374 #endif /* BCMPCIE */
375
376 typedef struct dhd_if_event {
377         struct list_head        list;
378         wl_event_data_if_t      event;
379         char                    name[IFNAMSIZ+1];
380         uint8                   mac[ETHER_ADDR_LEN];
381 } dhd_if_event_t;
382
383 /* Interface control information */
384 typedef struct dhd_if {
385         struct dhd_info *info;                  /* back pointer to dhd_info */
386         /* OS/stack specifics */
387         struct net_device *net;
388         int                             idx;                    /* iface idx in dongle */
389         uint                    subunit;                /* subunit */
390         uint8                   mac_addr[ETHER_ADDR_LEN];       /* assigned MAC address */
391         bool                    set_macaddress;
392         bool                    set_multicast;
393         uint8                   bssidx;                 /* bsscfg index for the interface */
394         bool                    attached;               /* Delayed attachment when unset */
395         bool                    txflowcontrol;  /* Per interface flow control indicator */
396         char                    name[IFNAMSIZ+1]; /* linux interface name */
397         char                    dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
398         struct net_device_stats stats;
399 #ifdef DHD_WMF
400         dhd_wmf_t               wmf;            /* per bsscfg wmf setting */
401 #endif /* DHD_WMF */
402 #ifdef PCIE_FULL_DONGLE
403         struct list_head sta_list;              /* sll of associated stations */
404 #if !defined(BCM_GMAC3)
405         spinlock_t      sta_list_lock;          /* lock for manipulating sll */
406 #endif /* ! BCM_GMAC3 */
407 #endif /* PCIE_FULL_DONGLE */
408         uint32  ap_isolate;                     /* ap-isolation settings */
409 #ifdef DHD_L2_FILTER
410         bool parp_enable;
411         bool parp_discard;
412         bool parp_allnode;
413         arp_table_t *phnd_arp_table;
414 /* for Per BSS modification */
415         bool dhcp_unicast;
416         bool block_ping;
417         bool grat_arp;
418 #endif /* DHD_L2_FILTER */
419 } dhd_if_t;
420
421 #ifdef WLMEDIA_HTSF
422 typedef struct {
423         uint32 low;
424         uint32 high;
425 } tsf_t;
426
427 typedef struct {
428         uint32 last_cycle;
429         uint32 last_sec;
430         uint32 last_tsf;
431         uint32 coef;     /* scaling factor */
432         uint32 coefdec1; /* first decimal  */
433         uint32 coefdec2; /* second decimal */
434 } htsf_t;
435
436 typedef struct {
437         uint32 t1;
438         uint32 t2;
439         uint32 t3;
440         uint32 t4;
441 } tstamp_t;
442
443 static tstamp_t ts[TSMAX];
444 static tstamp_t maxdelayts;
445 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
446
447 #endif  /* WLMEDIA_HTSF */
448
449 struct ipv6_work_info_t {
450         uint8                   if_idx;
451         char                    ipv6_addr[16];
452         unsigned long           event;
453 };
454
455 #ifdef DHD_DEBUG
456 typedef struct dhd_dump {
457         uint8 *buf;
458         int bufsize;
459 } dhd_dump_t;
460 #endif /* DHD_DEBUG */
461
462 /* When Perimeter locks are deployed, any blocking calls must be preceeded
463  * with a PERIM UNLOCK and followed by a PERIM LOCK.
464  * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
465  * wait_event_timeout().
466  */
467
468 /* Local private structure (extension of pub) */
469 typedef struct dhd_info {
470 #if defined(WL_WIRELESS_EXT)
471         wl_iw_t         iw;             /* wireless extensions state (must be first) */
472 #endif /* defined(WL_WIRELESS_EXT) */
473         dhd_pub_t pub;
474         dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
475
476         void *adapter;                  /* adapter information, interrupt, fw path etc. */
477         char fw_path[PATH_MAX];         /* path to firmware image */
478         char nv_path[PATH_MAX];         /* path to nvram vars file */
479         char conf_path[PATH_MAX];       /* path to config vars file */
480
481         /* serialize dhd iovars */
482         struct mutex dhd_iovar_mutex;
483
484         struct semaphore proto_sem;
485 #ifdef PROP_TXSTATUS
486         spinlock_t      wlfc_spinlock;
487
488 #endif /* PROP_TXSTATUS */
489 #ifdef WLMEDIA_HTSF
490         htsf_t  htsf;
491 #endif
492         wait_queue_head_t ioctl_resp_wait;
493         wait_queue_head_t d3ack_wait;
494         wait_queue_head_t dhd_bus_busy_state_wait;
495         uint32  default_wd_interval;
496
497         struct timer_list timer;
498         bool wd_timer_valid;
499 #ifdef DHD_PCIE_RUNTIMEPM
500         struct timer_list rpm_timer;
501         bool rpm_timer_valid;
502         tsk_ctl_t         thr_rpm_ctl;
503 #endif /* DHD_PCIE_RUNTIMEPM */
504         struct tasklet_struct tasklet;
505         spinlock_t      sdlock;
506         spinlock_t      txqlock;
507         spinlock_t      dhd_lock;
508
509         struct semaphore sdsem;
510         tsk_ctl_t       thr_dpc_ctl;
511         tsk_ctl_t       thr_wdt_ctl;
512
513         tsk_ctl_t       thr_rxf_ctl;
514         spinlock_t      rxf_lock;
515         bool            rxthread_enabled;
516
517         /* Wakelocks */
518 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
519         struct wake_lock wl_wifi;   /* Wifi wakelock */
520         struct wake_lock wl_rxwake; /* Wifi rx wakelock */
521         struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
522         struct wake_lock wl_wdwake; /* Wifi wd wakelock */
523         struct wake_lock wl_evtwake; /* Wifi event wakelock */
524 #ifdef BCMPCIE_OOB_HOST_WAKE
525         struct wake_lock wl_intrwake; /* Host wakeup wakelock */
526 #endif /* BCMPCIE_OOB_HOST_WAKE */
527 #ifdef DHD_USE_SCAN_WAKELOCK
528         struct wake_lock wl_scanwake;  /* Wifi scan wakelock */
529 #endif /* DHD_USE_SCAN_WAKELOCK */
530 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
531
532 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
533         /* net_device interface lock, prevent race conditions among net_dev interface
534          * calls and wifi_on or wifi_off
535          */
536         struct mutex dhd_net_if_mutex;
537         struct mutex dhd_suspend_mutex;
538 #endif
539         spinlock_t wakelock_spinlock;
540         spinlock_t wakelock_evt_spinlock;
541         uint32 wakelock_event_counter;
542         uint32 wakelock_counter;
543         int wakelock_wd_counter;
544         int wakelock_rx_timeout_enable;
545         int wakelock_ctrl_timeout_enable;
546         bool waive_wakelock;
547         uint32 wakelock_before_waive;
548
549         /* Thread to issue ioctl for multicast */
550         wait_queue_head_t ctrl_wait;
551         atomic_t pend_8021x_cnt;
552         dhd_attach_states_t dhd_state;
553 #ifdef SHOW_LOGTRACE
554         dhd_event_log_t event_data;
555 #endif /* SHOW_LOGTRACE */
556
557 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
558         struct early_suspend early_suspend;
559 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
560
561 #ifdef ARP_OFFLOAD_SUPPORT
562         u32 pend_ipaddr;
563 #endif /* ARP_OFFLOAD_SUPPORT */
564 #ifdef BCM_FD_AGGR
565         void *rpc_th;
566         void *rpc_osh;
567         struct timer_list rpcth_timer;
568         bool rpcth_timer_active;
569         uint8 fdaggr;
570 #endif
571 #ifdef DHDTCPACK_SUPPRESS
572         spinlock_t      tcpack_lock;
573 #endif /* DHDTCPACK_SUPPRESS */
574 #ifdef FIX_CPU_MIN_CLOCK
575         bool cpufreq_fix_status;
576         struct mutex cpufreq_fix;
577         struct pm_qos_request dhd_cpu_qos;
578 #ifdef FIX_BUS_MIN_CLOCK
579         struct pm_qos_request dhd_bus_qos;
580 #endif /* FIX_BUS_MIN_CLOCK */
581 #endif /* FIX_CPU_MIN_CLOCK */
582         void                    *dhd_deferred_wq;
583 #ifdef DEBUG_CPU_FREQ
584         struct notifier_block freq_trans;
585         int __percpu *new_freq;
586 #endif
587         unsigned int unit;
588         struct notifier_block pm_notifier;
589 #ifdef DHD_PSTA
590         uint32  psta_mode;      /* PSTA or PSR */
591 #endif /* DHD_PSTA */
592 #ifdef DHD_DEBUG
593         dhd_dump_t *dump;
594         struct timer_list join_timer;
595         u32 join_timeout_val;
596         bool join_timer_active;
597         uint scan_time_count;
598         struct timer_list scan_timer;
599         bool scan_timer_active;
600 #endif
601 #if defined(DHD_LB)
602         /* CPU Load Balance dynamic CPU selection */
603
604         /* Variable that tracks the currect CPUs available for candidacy */
605         cpumask_var_t cpumask_curr_avail;
606
607         /* Primary and secondary CPU mask */
608         cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
609         cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
610
611         struct notifier_block cpu_notifier;
612
613         /* Tasklet to handle Tx Completion packet freeing */
614         struct tasklet_struct tx_compl_tasklet;
615         atomic_t        tx_compl_cpu;
616
617
618         /* Tasklet to handle RxBuf Post during Rx completion */
619         struct tasklet_struct rx_compl_tasklet;
620         atomic_t        rx_compl_cpu;
621
622         /* Napi struct for handling rx packet sendup. Packets are removed from
623          * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
624          * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
625          * to run to rx_napi_cpu.
626          */
627         struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
628         struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
629         struct napi_struct    rx_napi_struct ____cacheline_aligned;
630         atomic_t        rx_napi_cpu; /* cpu on which the napi is dispatched */
631         struct net_device    *rx_napi_netdev; /* netdev of primary interface */
632
633         struct work_struct    rx_napi_dispatcher_work;
634         struct work_struct    tx_compl_dispatcher_work;
635         struct work_struct    rx_compl_dispatcher_work;
636         /* Number of times DPC Tasklet ran */
637         uint32  dhd_dpc_cnt;
638
639         /* Number of times NAPI processing got scheduled */
640         uint32  napi_sched_cnt;
641
642         /* Number of times NAPI processing ran on each available core */
643         uint32  napi_percpu_run_cnt[NR_CPUS];
644
645         /* Number of times RX Completions got scheduled */
646         uint32  rxc_sched_cnt;
647         /* Number of times RX Completion ran on each available core */
648         uint32  rxc_percpu_run_cnt[NR_CPUS];
649
650         /* Number of times TX Completions got scheduled */
651         uint32  txc_sched_cnt;
652         /* Number of times TX Completions ran on each available core */
653         uint32  txc_percpu_run_cnt[NR_CPUS];
654
655         /* CPU status */
656         /* Number of times each CPU came online */
657         uint32  cpu_online_cnt[NR_CPUS];
658
659         /* Number of times each CPU went offline */
660         uint32  cpu_offline_cnt[NR_CPUS];
661
662         /*
663          * Consumer Histogram - NAPI RX Packet processing
664          * -----------------------------------------------
665          * On Each CPU, when the NAPI RX Packet processing call back was invoked
666          * how many packets were processed is captured in this data structure.
667          * Now its difficult to capture the "exact" number of packets processed.
668          * So considering the packet counter to be a 32 bit one, we have a
669          * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
670          * processed is rounded off to the next power of 2 and put in the
671          * approriate "bin" the value in the bin gets incremented.
672          * For example, assume that in CPU 1 if NAPI Rx runs 3 times
673          * and the packet count processed is as follows (assume the bin counters are 0)
674          * iteration 1 - 10 (the bin counter 2^4 increments to 1)
675          * iteration 2 - 30 (the bin counter 2^5 increments to 1)
676          * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
677          */
678         uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
679         uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
680         uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
681 #endif /* DHD_LB */
682
683 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
684 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
685
686         struct kobject dhd_kobj;
687 #ifdef SUPPORT_SENSORHUB
688         uint32 shub_enable;
689 #endif /* SUPPORT_SENSORHUB */
690
691         struct delayed_work dhd_memdump_work;
692 } dhd_info_t;
693
694 #define DHDIF_FWDER(dhdif)      FALSE
695
696 /* Flag to indicate if we should download firmware on driver load */
697 uint dhd_download_fw_on_driverload = TRUE;
698
699 /* Flag to indicate if driver is initialized */
700 uint dhd_driver_init_done = FALSE;
701
702 /* Definitions to provide path to the firmware and nvram
703  * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
704  */
705 char firmware_path[MOD_PARAM_PATHLEN];
706 char nvram_path[MOD_PARAM_PATHLEN];
707 char config_path[MOD_PARAM_PATHLEN];
708
709 /* backup buffer for firmware and nvram path */
710 char fw_bak_path[MOD_PARAM_PATHLEN];
711 char nv_bak_path[MOD_PARAM_PATHLEN];
712
713 /* information string to keep firmware, chio, cheip version info visiable from log */
714 char info_string[MOD_PARAM_INFOLEN];
715 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
716 int op_mode = 0;
717 int disable_proptx = 0;
718 module_param(op_mode, int, 0644);
719
720 #if defined(DHD_LB_RXP)
721 static int dhd_napi_weight = 32;
722 module_param(dhd_napi_weight, int, 0644);
723 #endif /* DHD_LB_RXP */
724
725 extern int wl_control_wl_start(struct net_device *dev);
726 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
727 struct semaphore dhd_registration_sem;
728 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
729
730 /* deferred handlers */
731 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
732 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
733 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
734 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
735 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
736 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
737 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
738 #ifdef WL_CFG80211
739 extern void dhd_netdev_free(struct net_device *ndev);
740 #endif /* WL_CFG80211 */
741
742 /* Error bits */
743 module_param(dhd_msg_level, int, 0);
744 #if defined(WL_WIRELESS_EXT)
745 module_param(iw_msg_level, int, 0);
746 #endif
747 #ifdef WL_CFG80211
748 module_param(wl_dbg_level, int, 0);
749 #endif
750 module_param(android_msg_level, int, 0);
751 module_param(config_msg_level, int, 0);
752
753 #ifdef ARP_OFFLOAD_SUPPORT
754 /* ARP offload enable */
755 uint dhd_arp_enable = TRUE;
756 module_param(dhd_arp_enable, uint, 0);
757
758 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
759
760 #ifdef ENABLE_ARP_SNOOP_MODE
761 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
762 #else
763 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
764 #endif  /* ENABLE_ARP_SNOOP_MODE */
765
766 module_param(dhd_arp_mode, uint, 0);
767 #endif /* ARP_OFFLOAD_SUPPORT */
768
769 /* Disable Prop tx */
770 module_param(disable_proptx, int, 0644);
771 /* load firmware and/or nvram values from the filesystem */
772 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
773 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
774 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
775
776 /* Watchdog interval */
777
778 /* extend watchdog expiration to 2 seconds when DPC is running */
779 #define WATCHDOG_EXTEND_INTERVAL (2000)
780
781 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
782 module_param(dhd_watchdog_ms, uint, 0);
783
784 #ifdef DHD_PCIE_RUNTIMEPM
785 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
786 #endif /* DHD_PCIE_RUNTIMEPMT */
787 #if defined(DHD_DEBUG)
788 /* Console poll interval */
789 uint dhd_console_ms = 0;
790 module_param(dhd_console_ms, uint, 0644);
791 #endif /* defined(DHD_DEBUG) */
792
793
794 uint dhd_slpauto = TRUE;
795 module_param(dhd_slpauto, uint, 0);
796
797 #ifdef PKT_FILTER_SUPPORT
798 /* Global Pkt filter enable control */
799 uint dhd_pkt_filter_enable = TRUE;
800 module_param(dhd_pkt_filter_enable, uint, 0);
801 #endif
802
803 /* Pkt filter init setup */
804 uint dhd_pkt_filter_init = 0;
805 module_param(dhd_pkt_filter_init, uint, 0);
806
807 /* Pkt filter mode control */
808 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
809 uint dhd_master_mode = FALSE;
810 #else
811 uint dhd_master_mode = FALSE;
812 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
813 module_param(dhd_master_mode, uint, 0);
814
815 int dhd_watchdog_prio = 0;
816 module_param(dhd_watchdog_prio, int, 0);
817
818 /* DPC thread priority */
819 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
820 module_param(dhd_dpc_prio, int, 0);
821
822 /* RX frame thread priority */
823 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
824 module_param(dhd_rxf_prio, int, 0);
825
826 int passive_channel_skip = 0;
827 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
828
829 #if !defined(BCMDHDUSB)
830 extern int dhd_dongle_ramsize;
831 module_param(dhd_dongle_ramsize, int, 0);
832 #endif /* BCMDHDUSB */
833
834 /* Keep track of number of instances */
835 static int dhd_found = 0;
836 static int instance_base = 0; /* Starting instance number */
837 module_param(instance_base, int, 0644);
838
839 /* Functions to manage sysfs interface for dhd */
840 static int dhd_sysfs_init(dhd_info_t *dhd);
841 static void dhd_sysfs_exit(dhd_info_t *dhd);
842
843 #if defined(DHD_LB)
844
845 static void
846 dhd_lb_set_default_cpus(dhd_info_t *dhd)
847 {
848         /* Default CPU allocation for the jobs */
849         atomic_set(&dhd->rx_napi_cpu, 1);
850         atomic_set(&dhd->rx_compl_cpu, 2);
851         atomic_set(&dhd->tx_compl_cpu, 2);
852 }
853
854 static void
855 dhd_cpumasks_deinit(dhd_info_t *dhd)
856 {
857         free_cpumask_var(dhd->cpumask_curr_avail);
858         free_cpumask_var(dhd->cpumask_primary);
859         free_cpumask_var(dhd->cpumask_primary_new);
860         free_cpumask_var(dhd->cpumask_secondary);
861         free_cpumask_var(dhd->cpumask_secondary_new);
862 }
863
864 static int
865 dhd_cpumasks_init(dhd_info_t *dhd)
866 {
867         int id;
868         uint32 cpus;
869         int ret = 0;
870
871         if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
872                 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
873                 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
874                 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
875                 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
876                 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
877                 ret = -ENOMEM;
878                 goto fail;
879         }
880
881         cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
882         cpumask_clear(dhd->cpumask_primary);
883         cpumask_clear(dhd->cpumask_secondary);
884
885         cpus = DHD_LB_PRIMARY_CPUS;
886         for (id = 0; id < NR_CPUS; id++) {
887                 if (isset(&cpus, id))
888                         cpumask_set_cpu(id, dhd->cpumask_primary);
889         }
890
891         cpus = DHD_LB_SECONDARY_CPUS;
892         for (id = 0; id < NR_CPUS; id++) {
893                 if (isset(&cpus, id))
894                         cpumask_set_cpu(id, dhd->cpumask_secondary);
895         }
896
897         return ret;
898 fail:
899         dhd_cpumasks_deinit(dhd);
900         return ret;
901 }
902
903 /*
904  * The CPU Candidacy Algorithm
905  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
906  * The available CPUs for selection are divided into two groups
907  *  Primary Set - A CPU mask that carries the First Choice CPUs
908  *  Secondary Set - A CPU mask that carries the Second Choice CPUs.
909  *
910  * There are two types of Job, that needs to be assigned to
911  * the CPUs, from one of the above mentioned CPU group. The Jobs are
912  * 1) Rx Packet Processing - napi_cpu
913  * 2) Completion Processiong (Tx, RX) - compl_cpu
914  *
915  * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
916  * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
917  * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
918  * If there are more processors free, it assigns one to compl_cpu.
919  * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
920  * CPU, as much as possible.
921  *
922  * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
923  * would allow Tx completion skb's to be released into a local free pool from
924  * which the rx buffer posts could have been serviced. it is important to note
925  * that a Tx packet may not have a large enough buffer for rx posting.
926  */
927 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
928 {
929         uint32 primary_available_cpus; /* count of primary available cpus */
930         uint32 secondary_available_cpus; /* count of secondary available cpus */
931         uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
932         uint32 compl_cpu = 0; /* cpu selected for completion jobs */
933
934         cpumask_clear(dhd->cpumask_primary_new);
935         cpumask_clear(dhd->cpumask_secondary_new);
936
937         /*
938          * Now select from the primary mask. Even if a Job is
939          * already running on a CPU in secondary group, we still move
940          * to primary CPU. So no conditional checks.
941          */
942         cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
943                 dhd->cpumask_curr_avail);
944
945         cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
946                 dhd->cpumask_curr_avail);
947
948         primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
949
950         if (primary_available_cpus > 0) {
951                 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
952
953                 /* If no further CPU is available,
954                  * cpumask_next returns >= nr_cpu_ids
955                  */
956                 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
957                 if (compl_cpu >= nr_cpu_ids)
958                         compl_cpu = 0;
959         }
960
961         DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
962                 __FUNCTION__, napi_cpu, compl_cpu));
963
964         /* -- Now check for the CPUs from the secondary mask -- */
965         secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
966
967         DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
968                 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
969
970         if (secondary_available_cpus > 0) {
971                 /* At this point if napi_cpu is unassigned it means no CPU
972                  * is online from Primary Group
973                  */
974                 if (napi_cpu == 0) {
975                         napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
976                         compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
977                 } else if (compl_cpu == 0) {
978                         compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
979                 }
980
981                 /* If no CPU was available for completion, choose CPU 0 */
982                 if (compl_cpu >= nr_cpu_ids)
983                         compl_cpu = 0;
984         }
985         if ((primary_available_cpus == 0) &&
986                 (secondary_available_cpus == 0)) {
987                 /* No CPUs available from primary or secondary mask */
988                 napi_cpu = 0;
989                 compl_cpu = 0;
990         }
991
992         DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
993                 __FUNCTION__, napi_cpu, compl_cpu));
994         ASSERT(napi_cpu < nr_cpu_ids);
995         ASSERT(compl_cpu < nr_cpu_ids);
996
997         atomic_set(&dhd->rx_napi_cpu, napi_cpu);
998         atomic_set(&dhd->tx_compl_cpu, compl_cpu);
999         atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1000         return;
1001 }
1002
1003 /*
1004  * Function to handle CPU Hotplug notifications.
1005  * One of the task it does is to trigger the CPU Candidacy algorithm
1006  * for load balancing.
1007  */
1008 int
1009 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1010 {
1011         unsigned int cpu = (unsigned int)(long)hcpu;
1012
1013         dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1014
1015         switch (action)
1016         {
1017                 case CPU_ONLINE:
1018                         DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1019                         cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1020                         dhd_select_cpu_candidacy(dhd);
1021                         break;
1022
1023                 case CPU_DOWN_PREPARE:
1024                 case CPU_DOWN_PREPARE_FROZEN:
1025                         DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1026                         cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1027                         dhd_select_cpu_candidacy(dhd);
1028                         break;
1029                 default:
1030                         break;
1031         }
1032
1033         return NOTIFY_OK;
1034 }
1035
1036 #if defined(DHD_LB_STATS)
1037 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1038 {
1039         dhd_info_t *dhd;
1040         int i, j;
1041
1042         if (dhdp == NULL) {
1043                 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1044                         __FUNCTION__));
1045                 return;
1046         }
1047
1048         dhd = dhdp->info;
1049         if (dhd == NULL) {
1050                 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1051                 return;
1052         }
1053
1054         DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1055         DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1056         DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1057         DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1058
1059         for (i = 0; i < NR_CPUS; i++) {
1060                 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1061                 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1062                 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1063
1064                 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1065                 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1066         }
1067
1068         for (i = 0; i < NR_CPUS; i++) {
1069                 for (j = 0; j < HIST_BIN_SIZE; j++) {
1070                         DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1071                         DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1072                         DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1073                 }
1074         }
1075
1076         return;
1077 }
1078
1079 static void dhd_lb_stats_dump_histo(
1080         struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1081 {
1082         int i, j;
1083         uint32 per_cpu_total[NR_CPUS] = {0};
1084         uint32 total = 0;
1085
1086         bcm_bprintf(strbuf, "CPU: \t\t");
1087         for (i = 0; i < num_possible_cpus(); i++)
1088                 bcm_bprintf(strbuf, "%d\t", i);
1089         bcm_bprintf(strbuf, "\nBin\n");
1090
1091         for (i = 0; i < HIST_BIN_SIZE; i++) {
1092                 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1093                 for (j = 0; j < num_possible_cpus(); j++) {
1094                         bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1095                 }
1096                 bcm_bprintf(strbuf, "\n");
1097         }
1098         bcm_bprintf(strbuf, "Per CPU Total \t");
1099         total = 0;
1100         for (i = 0; i < num_possible_cpus(); i++) {
1101                 for (j = 0; j < HIST_BIN_SIZE; j++) {
1102                         per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1103                 }
1104                 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1105                 total += per_cpu_total[i];
1106         }
1107         bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1108
1109         return;
1110 }
1111
1112 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1113 {
1114         int i;
1115
1116         bcm_bprintf(strbuf, "CPU: \t");
1117         for (i = 0; i < num_possible_cpus(); i++)
1118                 bcm_bprintf(strbuf, "%d\t", i);
1119         bcm_bprintf(strbuf, "\n");
1120
1121         bcm_bprintf(strbuf, "Val: \t");
1122         for (i = 0; i < num_possible_cpus(); i++)
1123                 bcm_bprintf(strbuf, "%u\t", *(p+i));
1124         bcm_bprintf(strbuf, "\n");
1125         return;
1126 }
1127
1128 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1129 {
1130         dhd_info_t *dhd;
1131
1132         if (dhdp == NULL || strbuf == NULL) {
1133                 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1134                         __FUNCTION__, dhdp, strbuf));
1135                 return;
1136         }
1137
1138         dhd = dhdp->info;
1139         if (dhd == NULL) {
1140                 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1141                 return;
1142         }
1143
1144         bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1145         dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1146
1147         bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1148         dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1149
1150         bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1151                 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1152                 dhd->txc_sched_cnt);
1153 #ifdef DHD_LB_RXP
1154         bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1155         dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1156         bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1157         dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1158 #endif /* DHD_LB_RXP */
1159
1160 #ifdef DHD_LB_RXC
1161         bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1162         dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1163         bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1164         dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1165 #endif /* DHD_LB_RXC */
1166
1167
1168 #ifdef DHD_LB_TXC
1169         bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1170         dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1171         bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1172         dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1173 #endif /* DHD_LB_TXC */
1174 }
1175
1176 static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1177 {
1178         uint32 bin_power;
1179         uint32 *p = NULL;
1180
1181         bin_power = next_larger_power2(count);
1182
1183         switch (bin_power) {
1184                 case   0: break;
1185                 case   1: /* Fall through intentionally */
1186                 case   2: p = bin + 0; break;
1187                 case   4: p = bin + 1; break;
1188                 case   8: p = bin + 2; break;
1189                 case  16: p = bin + 3; break;
1190                 case  32: p = bin + 4; break;
1191                 case  64: p = bin + 5; break;
1192                 case 128: p = bin + 6; break;
1193                 default : p = bin + 7; break;
1194         }
1195         if (p)
1196                 *p = *p + 1;
1197         return;
1198 }
1199
1200 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1201 {
1202         int cpu;
1203         dhd_info_t *dhd = dhdp->info;
1204
1205         cpu = get_cpu();
1206         put_cpu();
1207         dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1208
1209         return;
1210 }
1211
1212 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1213 {
1214         int cpu;
1215         dhd_info_t *dhd = dhdp->info;
1216
1217         cpu = get_cpu();
1218         put_cpu();
1219         dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1220
1221         return;
1222 }
1223
1224 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1225 {
1226         int cpu;
1227         dhd_info_t *dhd = dhdp->info;
1228
1229         cpu = get_cpu();
1230         put_cpu();
1231         dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1232
1233         return;
1234 }
1235
1236 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1237 {
1238         dhd_info_t *dhd = dhdp->info;
1239         DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1240 }
1241
1242 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1243 {
1244         dhd_info_t *dhd = dhdp->info;
1245         DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1246 }
1247
1248 #endif /* DHD_LB_STATS */
1249 #endif /* DHD_LB */
1250
1251
1252 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1253 int g_frameburst = 1;
1254 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1255
1256 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1257
1258 /* DHD Perimiter lock only used in router with bypass forwarding. */
1259 #define DHD_PERIM_RADIO_INIT()              do { /* noop */ } while (0)
1260 #define DHD_PERIM_LOCK_TRY(unit, flag)      do { /* noop */ } while (0)
1261 #define DHD_PERIM_UNLOCK_TRY(unit, flag)    do { /* noop */ } while (0)
1262
1263 #ifdef PCIE_FULL_DONGLE
1264 #if defined(BCM_GMAC3)
1265 #define DHD_IF_STA_LIST_LOCK_INIT(ifp)      do { /* noop */ } while (0)
1266 #define DHD_IF_STA_LIST_LOCK(ifp, flags)    ({ BCM_REFERENCE(flags); })
1267 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags)  ({ BCM_REFERENCE(flags); })
1268
1269 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1270 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1271 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1272 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1273
1274 #else /* ! BCM_GMAC3 */
1275 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1276 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1277         spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1278 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1279         spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1280
1281 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1282 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1283         struct list_head *snapshot_list);
1284 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1285 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1286 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1287 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1288
1289 #endif /* ! BCM_GMAC3 */
1290 #endif /* PCIE_FULL_DONGLE */
1291
1292 /* Control fw roaming */
1293 uint dhd_roam_disable = 0;
1294
1295 #ifdef BCMDBGFS
1296 extern int dhd_dbg_init(dhd_pub_t *dhdp);
1297 extern void dhd_dbg_remove(void);
1298 #endif
1299
1300 /* Control radio state */
1301 uint dhd_radio_up = 1;
1302
1303 /* Network inteface name */
1304 char iface_name[IFNAMSIZ] = {'\0'};
1305 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1306
1307 /* The following are specific to the SDIO dongle */
1308
1309 /* IOCTL response timeout */
1310 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1311
1312 /* Idle timeout for backplane clock */
1313 int dhd_idletime = DHD_IDLETIME_TICKS;
1314 module_param(dhd_idletime, int, 0);
1315
1316 /* Use polling */
1317 uint dhd_poll = FALSE;
1318 module_param(dhd_poll, uint, 0);
1319
1320 /* Use interrupts */
1321 uint dhd_intr = TRUE;
1322 module_param(dhd_intr, uint, 0);
1323
1324 /* SDIO Drive Strength (in milliamps) */
1325 uint dhd_sdiod_drive_strength = 6;
1326 module_param(dhd_sdiod_drive_strength, uint, 0);
1327
1328 #ifdef BCMSDIO
1329 /* Tx/Rx bounds */
1330 extern uint dhd_txbound;
1331 extern uint dhd_rxbound;
1332 module_param(dhd_txbound, uint, 0);
1333 module_param(dhd_rxbound, uint, 0);
1334
1335 /* Deferred transmits */
1336 extern uint dhd_deferred_tx;
1337 module_param(dhd_deferred_tx, uint, 0);
1338
1339 #endif /* BCMSDIO */
1340
1341
1342 #ifdef SDTEST
1343 /* Echo packet generator (pkts/s) */
1344 uint dhd_pktgen = 0;
1345 module_param(dhd_pktgen, uint, 0);
1346
1347 /* Echo packet len (0 => sawtooth, max 2040) */
1348 uint dhd_pktgen_len = 0;
1349 module_param(dhd_pktgen_len, uint, 0);
1350 #endif /* SDTEST */
1351
1352
1353
1354 /* Allow delayed firmware download for debug purpose */
1355 int allow_delay_fwdl = FALSE;
1356 module_param(allow_delay_fwdl, int, 0);
1357
1358 extern char dhd_version[];
1359 extern char fw_version[];
1360
1361 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1362 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1363 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1364 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1365 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1366
1367 #ifdef WLMEDIA_HTSF
1368 void htsf_update(dhd_info_t *dhd, void *data);
1369 tsf_t prev_tsf, cur_tsf;
1370
1371 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1372 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1373 static void dhd_dump_latency(void);
1374 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1375 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1376 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1377 #endif /* WLMEDIA_HTSF */
1378
1379 /* Monitor interface */
1380 int dhd_monitor_init(void *dhd_pub);
1381 int dhd_monitor_uninit(void);
1382
1383
1384 #if defined(WL_WIRELESS_EXT)
1385 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1386 #endif /* defined(WL_WIRELESS_EXT) */
1387
1388 static void dhd_dpc(ulong data);
1389 /* forward decl */
1390 extern int dhd_wait_pend8021x(struct net_device *dev);
1391 void dhd_os_wd_timer_extend(void *bus, bool extend);
1392
1393 #ifdef TOE
1394 #ifndef BDC
1395 #error TOE requires BDC
1396 #endif /* !BDC */
1397 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1398 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1399 #endif /* TOE */
1400
1401 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1402                              wl_event_msg_t *event_ptr, void **data_ptr);
1403
1404 #if defined(CONFIG_PM_SLEEP)
1405 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1406 {
1407         int ret = NOTIFY_DONE;
1408         bool suspend = FALSE;
1409         dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1410
1411         BCM_REFERENCE(dhdinfo);
1412
1413         switch (action) {
1414         case PM_HIBERNATION_PREPARE:
1415         case PM_SUSPEND_PREPARE:
1416                 suspend = TRUE;
1417                 break;
1418
1419         case PM_POST_HIBERNATION:
1420         case PM_POST_SUSPEND:
1421                 suspend = FALSE;
1422                 break;
1423         }
1424
1425 #if defined(SUPPORT_P2P_GO_PS)
1426 #ifdef PROP_TXSTATUS
1427         if (suspend) {
1428                 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1429                 dhd_wlfc_suspend(&dhdinfo->pub);
1430                 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1431         } else
1432                 dhd_wlfc_resume(&dhdinfo->pub);
1433 #endif /* PROP_TXSTATUS */
1434 #endif /* defined(SUPPORT_P2P_GO_PS) */
1435
1436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1437         KERNEL_VERSION(2, 6, 39))
1438         dhd_mmc_suspend = suspend;
1439         smp_mb();
1440 #endif
1441
1442         return ret;
1443 }
1444
1445 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1446  * created in kernel notifier link list (with 'next' pointing to itself)
1447  */
1448 static bool dhd_pm_notifier_registered = FALSE;
1449
1450 extern int register_pm_notifier(struct notifier_block *nb);
1451 extern int unregister_pm_notifier(struct notifier_block *nb);
1452 #endif /* CONFIG_PM_SLEEP */
1453
1454 /* Request scheduling of the bus rx frame */
1455 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1456 static void dhd_os_rxflock(dhd_pub_t *pub);
1457 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1458
1459 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1460 typedef struct dhd_dev_priv {
1461         dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1462         dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
1463         int          ifidx; /* interface index */
1464 } dhd_dev_priv_t;
1465
1466 #define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
1467 #define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
1468 #define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1469 #define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1470 #define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1471
1472 /** Clear the dhd net_device's private structure. */
1473 static inline void
1474 dhd_dev_priv_clear(struct net_device * dev)
1475 {
1476         dhd_dev_priv_t * dev_priv;
1477         ASSERT(dev != (struct net_device *)NULL);
1478         dev_priv = DHD_DEV_PRIV(dev);
1479         dev_priv->dhd = (dhd_info_t *)NULL;
1480         dev_priv->ifp = (dhd_if_t *)NULL;
1481         dev_priv->ifidx = DHD_BAD_IF;
1482 }
1483
1484 /** Setup the dhd net_device's private structure. */
1485 static inline void
1486 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1487                   int ifidx)
1488 {
1489         dhd_dev_priv_t * dev_priv;
1490         ASSERT(dev != (struct net_device *)NULL);
1491         dev_priv = DHD_DEV_PRIV(dev);
1492         dev_priv->dhd = dhd;
1493         dev_priv->ifp = ifp;
1494         dev_priv->ifidx = ifidx;
1495 }
1496
1497 #ifdef PCIE_FULL_DONGLE
1498
1499 /** Dummy objects are defined with state representing bad|down.
1500  * Performance gains from reducing branch conditionals, instruction parallelism,
1501  * dual issue, reducing load shadows, avail of larger pipelines.
1502  * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1503  * is accessed via the dhd_sta_t.
1504  */
1505
1506 /* Dummy dhd_info object */
1507 dhd_info_t dhd_info_null = {
1508 #if defined(BCM_GMAC3)
1509         .fwdh = FWDER_NULL,
1510 #endif
1511         .pub = {
1512                  .info = &dhd_info_null,
1513 #ifdef DHDTCPACK_SUPPRESS
1514                  .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1515 #endif /* DHDTCPACK_SUPPRESS */
1516                  .up = FALSE,
1517                  .busstate = DHD_BUS_DOWN
1518         }
1519 };
1520 #define DHD_INFO_NULL (&dhd_info_null)
1521 #define DHD_PUB_NULL  (&dhd_info_null.pub)
1522
1523 /* Dummy netdevice object */
1524 struct net_device dhd_net_dev_null = {
1525         .reg_state = NETREG_UNREGISTERED
1526 };
1527 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1528
1529 /* Dummy dhd_if object */
1530 dhd_if_t dhd_if_null = {
1531 #if defined(BCM_GMAC3)
1532         .fwdh = FWDER_NULL,
1533 #endif
1534 #ifdef WMF
1535         .wmf = { .wmf_enable = TRUE },
1536 #endif
1537         .info = DHD_INFO_NULL,
1538         .net = DHD_NET_DEV_NULL,
1539         .idx = DHD_BAD_IF
1540 };
1541 #define DHD_IF_NULL  (&dhd_if_null)
1542
1543 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1544
1545 /** Interface STA list management. */
1546
1547 /** Fetch the dhd_if object, given the interface index in the dhd. */
1548 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1549
1550 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1551 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1552 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1553
1554 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1555 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1556 static void     dhd_if_flush_sta(dhd_if_t * ifp);
1557
1558 /* Construct/Destruct a sta pool. */
1559 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1560 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1561 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1562 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1563
1564
1565 /* Return interface pointer */
1566 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1567 {
1568         ASSERT(ifidx < DHD_MAX_IFS);
1569
1570         if (ifidx >= DHD_MAX_IFS)
1571                 return NULL;
1572
1573         return dhdp->info->iflist[ifidx];
1574 }
1575
1576 /** Reset a dhd_sta object and free into the dhd pool. */
1577 static void
1578 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1579 {
1580         int prio;
1581
1582         ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1583
1584         ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1585
1586         /*
1587          * Flush and free all packets in all flowring's queues belonging to sta.
1588          * Packets in flow ring will be flushed later.
1589          */
1590         for (prio = 0; prio < (int)NUMPRIO; prio++) {
1591                 uint16 flowid = sta->flowid[prio];
1592
1593                 if (flowid != FLOWID_INVALID) {
1594                         unsigned long flags;
1595                         flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1596                         flow_ring_node_t * flow_ring_node;
1597
1598 #ifdef DHDTCPACK_SUPPRESS
1599                         /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1600                          * when there is a newly coming packet from network stack.
1601                          */
1602                         dhd_tcpack_info_tbl_clean(dhdp);
1603 #endif /* DHDTCPACK_SUPPRESS */
1604
1605                         flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1606                         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1607                         flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1608
1609                         if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1610                                 void * pkt;
1611                                 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1612                                         PKTFREE(dhdp->osh, pkt, TRUE);
1613                                 }
1614                         }
1615
1616                         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1617                         ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1618                 }
1619
1620                 sta->flowid[prio] = FLOWID_INVALID;
1621         }
1622
1623         id16_map_free(dhdp->staid_allocator, sta->idx);
1624         DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1625         sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1626         sta->ifidx = DHD_BAD_IF;
1627         bzero(sta->ea.octet, ETHER_ADDR_LEN);
1628         INIT_LIST_HEAD(&sta->list);
1629         sta->idx = ID16_INVALID; /* implying free */
1630 }
1631
1632 /** Allocate a dhd_sta object from the dhd pool. */
1633 static dhd_sta_t *
1634 dhd_sta_alloc(dhd_pub_t * dhdp)
1635 {
1636         uint16 idx;
1637         dhd_sta_t * sta;
1638         dhd_sta_pool_t * sta_pool;
1639
1640         ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1641
1642         idx = id16_map_alloc(dhdp->staid_allocator);
1643         if (idx == ID16_INVALID) {
1644                 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1645                 return DHD_STA_NULL;
1646         }
1647
1648         sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1649         sta = &sta_pool[idx];
1650
1651         ASSERT((sta->idx == ID16_INVALID) &&
1652                (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1653
1654         DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1655
1656         sta->idx = idx; /* implying allocated */
1657
1658         return sta;
1659 }
1660
1661 /** Delete all STAs in an interface's STA list. */
1662 static void
1663 dhd_if_del_sta_list(dhd_if_t *ifp)
1664 {
1665         dhd_sta_t *sta, *next;
1666         unsigned long flags;
1667
1668         DHD_IF_STA_LIST_LOCK(ifp, flags);
1669
1670         list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1671 #if defined(BCM_GMAC3)
1672                 if (ifp->fwdh) {
1673                         /* Remove sta from WOFA forwarder. */
1674                         fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1675                 }
1676 #endif /* BCM_GMAC3 */
1677                 list_del(&sta->list);
1678                 dhd_sta_free(&ifp->info->pub, sta);
1679         }
1680
1681         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1682
1683         return;
1684 }
1685
1686 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1687 static void
1688 dhd_if_flush_sta(dhd_if_t * ifp)
1689 {
1690 #if defined(BCM_GMAC3)
1691
1692         if (ifp && (ifp->fwdh != FWDER_NULL)) {
1693                 dhd_sta_t *sta, *next;
1694                 unsigned long flags;
1695
1696                 DHD_IF_STA_LIST_LOCK(ifp, flags);
1697
1698                 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1699                         /* Remove any sta entry from WOFA forwarder. */
1700                         fwder_flush(ifp->fwdh, (wofa_t)sta);
1701                 }
1702
1703                 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1704         }
1705 #endif /* BCM_GMAC3 */
1706 }
1707
1708 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1709 static int
1710 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1711 {
1712         int idx, prio, sta_pool_memsz;
1713         dhd_sta_t * sta;
1714         dhd_sta_pool_t * sta_pool;
1715         void * staid_allocator;
1716
1717         ASSERT(dhdp != (dhd_pub_t *)NULL);
1718         ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1719
1720         /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1721         staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1722         if (staid_allocator == NULL) {
1723                 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1724                 return BCME_ERROR;
1725         }
1726
1727         /* Pre allocate a pool of dhd_sta objects (one extra). */
1728         sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1729         sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1730         if (sta_pool == NULL) {
1731                 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1732                 id16_map_fini(dhdp->osh, staid_allocator);
1733                 return BCME_ERROR;
1734         }
1735
1736         dhdp->sta_pool = sta_pool;
1737         dhdp->staid_allocator = staid_allocator;
1738
1739         /* Initialize all sta(s) for the pre-allocated free pool. */
1740         bzero((uchar *)sta_pool, sta_pool_memsz);
1741         for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1742                 sta = &sta_pool[idx];
1743                 sta->idx = id16_map_alloc(staid_allocator);
1744                 ASSERT(sta->idx <= max_sta);
1745         }
1746         /* Now place them into the pre-allocated free pool. */
1747         for (idx = 1; idx <= max_sta; idx++) {
1748                 sta = &sta_pool[idx];
1749                 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1750                         sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1751                 }
1752                 dhd_sta_free(dhdp, sta);
1753         }
1754
1755         return BCME_OK;
1756 }
1757
1758 /** Destruct the pool of dhd_sta_t objects.
1759  * Caller must ensure that no STA objects are currently associated with an if.
1760  */
1761 static void
1762 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1763 {
1764         dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1765
1766         if (sta_pool) {
1767                 int idx;
1768                 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1769                 for (idx = 1; idx <= max_sta; idx++) {
1770                         ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1771                         ASSERT(sta_pool[idx].idx == ID16_INVALID);
1772                 }
1773                 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1774                 dhdp->sta_pool = NULL;
1775         }
1776
1777         id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1778         dhdp->staid_allocator = NULL;
1779 }
1780
1781 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1782 static void
1783 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1784 {
1785         int idx, prio, sta_pool_memsz;
1786         dhd_sta_t * sta;
1787         dhd_sta_pool_t * sta_pool;
1788         void *staid_allocator;
1789
1790         if (!dhdp) {
1791                 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1792                 return;
1793         }
1794
1795         sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1796         staid_allocator = dhdp->staid_allocator;
1797
1798         if (!sta_pool) {
1799                 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1800                 return;
1801         }
1802
1803         if (!staid_allocator) {
1804                 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1805                 return;
1806         }
1807
1808         /* clear free pool */
1809         sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1810         bzero((uchar *)sta_pool, sta_pool_memsz);
1811
1812         /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1813         id16_map_clear(staid_allocator, max_sta, 1);
1814
1815         /* Initialize all sta(s) for the pre-allocated free pool. */
1816         for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1817                 sta = &sta_pool[idx];
1818                 sta->idx = id16_map_alloc(staid_allocator);
1819                 ASSERT(sta->idx <= max_sta);
1820         }
1821         /* Now place them into the pre-allocated free pool. */
1822         for (idx = 1; idx <= max_sta; idx++) {
1823                 sta = &sta_pool[idx];
1824                 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1825                         sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1826                 }
1827                 dhd_sta_free(dhdp, sta);
1828         }
1829 }
1830
1831 /** Find STA with MAC address ea in an interface's STA list. */
1832 dhd_sta_t *
1833 dhd_find_sta(void *pub, int ifidx, void *ea)
1834 {
1835         dhd_sta_t *sta;
1836         dhd_if_t *ifp;
1837         unsigned long flags;
1838
1839         ASSERT(ea != NULL);
1840         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1841         if (ifp == NULL)
1842                 return DHD_STA_NULL;
1843
1844         DHD_IF_STA_LIST_LOCK(ifp, flags);
1845
1846         list_for_each_entry(sta, &ifp->sta_list, list) {
1847                 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1848                         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1849                         return sta;
1850                 }
1851         }
1852
1853         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1854
1855         return DHD_STA_NULL;
1856 }
1857
1858 /** Add STA into the interface's STA list. */
1859 dhd_sta_t *
1860 dhd_add_sta(void *pub, int ifidx, void *ea)
1861 {
1862         dhd_sta_t *sta;
1863         dhd_if_t *ifp;
1864         unsigned long flags;
1865
1866         ASSERT(ea != NULL);
1867         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1868         if (ifp == NULL)
1869                 return DHD_STA_NULL;
1870
1871         sta = dhd_sta_alloc((dhd_pub_t *)pub);
1872         if (sta == DHD_STA_NULL) {
1873                 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1874                 return DHD_STA_NULL;
1875         }
1876
1877         memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1878
1879         /* link the sta and the dhd interface */
1880         sta->ifp = ifp;
1881         sta->ifidx = ifidx;
1882         INIT_LIST_HEAD(&sta->list);
1883
1884         DHD_IF_STA_LIST_LOCK(ifp, flags);
1885
1886         list_add_tail(&sta->list, &ifp->sta_list);
1887
1888 #if defined(BCM_GMAC3)
1889         if (ifp->fwdh) {
1890                 ASSERT(ISALIGNED(ea, 2));
1891                 /* Add sta to WOFA forwarder. */
1892                 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1893         }
1894 #endif /* BCM_GMAC3 */
1895
1896         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1897
1898         return sta;
1899 }
1900
1901 /** Delete STA from the interface's STA list. */
1902 void
1903 dhd_del_sta(void *pub, int ifidx, void *ea)
1904 {
1905         dhd_sta_t *sta, *next;
1906         dhd_if_t *ifp;
1907         unsigned long flags;
1908
1909         ASSERT(ea != NULL);
1910         ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1911         if (ifp == NULL)
1912                 return;
1913
1914         DHD_IF_STA_LIST_LOCK(ifp, flags);
1915
1916         list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1917                 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1918 #if defined(BCM_GMAC3)
1919                         if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1920                                 ASSERT(ISALIGNED(ea, 2));
1921                                 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1922                         }
1923 #endif /* BCM_GMAC3 */
1924                         list_del(&sta->list);
1925                         dhd_sta_free(&ifp->info->pub, sta);
1926                 }
1927         }
1928
1929         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1930 #ifdef DHD_L2_FILTER
1931         if (ifp->parp_enable) {
1932                 /* clear Proxy ARP cache of specific Ethernet Address */
1933                 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1934                         ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1935         }
1936 #endif /* DHD_L2_FILTER */
1937         return;
1938 }
1939
1940 /** Add STA if it doesn't exist. Not reentrant. */
1941 dhd_sta_t*
1942 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1943 {
1944         dhd_sta_t *sta;
1945
1946         sta = dhd_find_sta(pub, ifidx, ea);
1947
1948         if (!sta) {
1949                 /* Add entry */
1950                 sta = dhd_add_sta(pub, ifidx, ea);
1951         }
1952
1953         return sta;
1954 }
1955
1956 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1957 #if !defined(BCM_GMAC3)
1958 static struct list_head *
1959 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1960 {
1961         unsigned long flags;
1962         dhd_sta_t *sta, *snapshot;
1963
1964         INIT_LIST_HEAD(snapshot_list);
1965
1966         DHD_IF_STA_LIST_LOCK(ifp, flags);
1967
1968         list_for_each_entry(sta, &ifp->sta_list, list) {
1969                 /* allocate one and add to snapshot */
1970                 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1971                 if (snapshot == NULL) {
1972                         DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1973                         continue;
1974                 }
1975
1976                 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1977
1978                 INIT_LIST_HEAD(&snapshot->list);
1979                 list_add_tail(&snapshot->list, snapshot_list);
1980         }
1981
1982         DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1983
1984         return snapshot_list;
1985 }
1986
1987 static void
1988 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1989 {
1990         dhd_sta_t *sta, *next;
1991
1992         list_for_each_entry_safe(sta, next, snapshot_list, list) {
1993                 list_del(&sta->list);
1994                 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1995         }
1996 }
1997 #endif /* !BCM_GMAC3 */
1998 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1999
2000 #else
2001 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2002 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2003 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2004 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2005 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2006 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2007 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2008 #endif /* PCIE_FULL_DONGLE */
2009
2010
2011 #if defined(DHD_LB)
2012
2013 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2014 /**
2015  * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2016  * CPU and schedules a tasklet.
2017  * @tasklet: opaque pointer to the tasklet
2018  */
2019 static INLINE void
2020 dhd_tasklet_schedule(void *tasklet)
2021 {
2022         tasklet_schedule((struct tasklet_struct *)tasklet);
2023 }
2024
2025 /**
2026  * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2027  * @tasklet: tasklet to be scheduled
2028  * @on_cpu: cpu core id
2029  *
2030  * If the requested cpu is online, then an IPI is sent to this cpu via the
2031  * smp_call_function_single with no wait and the tasklet_schedule function
2032  * will be invoked to schedule the specified tasklet on the requested CPU.
2033  */
2034 static void
2035 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2036 {
2037         const int wait = 0;
2038         smp_call_function_single(on_cpu,
2039                 dhd_tasklet_schedule, (void *)tasklet, wait);
2040 }
2041 #endif /* DHD_LB_TXC || DHD_LB_RXC */
2042
2043
2044 #if defined(DHD_LB_TXC)
2045 /**
2046  * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2047  * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2048  * freeing the packets placed in the tx_compl workq
2049  */
2050 void
2051 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2052 {
2053         dhd_info_t *dhd = dhdp->info;
2054         int curr_cpu, on_cpu;
2055
2056         if (dhd->rx_napi_netdev == NULL) {
2057                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2058                 return;
2059         }
2060
2061         DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2062         /*
2063          * If the destination CPU is NOT online or is same as current CPU
2064          * no need to schedule the work
2065          */
2066         curr_cpu = get_cpu();
2067         put_cpu();
2068
2069         on_cpu = atomic_read(&dhd->tx_compl_cpu);
2070
2071         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2072                 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2073         } else {
2074                 schedule_work(&dhd->tx_compl_dispatcher_work);
2075         }
2076 }
2077
2078 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2079 {
2080         struct dhd_info *dhd =
2081                 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2082         int cpu;
2083
2084         get_online_cpus();
2085         cpu = atomic_read(&dhd->tx_compl_cpu);
2086         if (!cpu_online(cpu))
2087                 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2088         else
2089                 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2090         put_online_cpus();
2091 }
2092
2093 #endif /* DHD_LB_TXC */
2094
2095
2096 #if defined(DHD_LB_RXC)
2097 /**
2098  * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2099  * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2100  * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2101  * placed in the rx_compl workq.
2102  *
2103  * @dhdp: pointer to dhd_pub object
2104  */
2105 void
2106 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2107 {
2108         dhd_info_t *dhd = dhdp->info;
2109         int curr_cpu, on_cpu;
2110
2111         if (dhd->rx_napi_netdev == NULL) {
2112                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2113                 return;
2114         }
2115
2116         DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2117         /*
2118          * If the destination CPU is NOT online or is same as current CPU
2119          * no need to schedule the work
2120          */
2121         curr_cpu = get_cpu();
2122         put_cpu();
2123
2124         on_cpu = atomic_read(&dhd->rx_compl_cpu);
2125
2126         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2127                 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2128         } else {
2129                 schedule_work(&dhd->rx_compl_dispatcher_work);
2130         }
2131 }
2132
2133 static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2134 {
2135         struct dhd_info *dhd =
2136                 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2137         int cpu;
2138
2139         get_online_cpus();
2140         cpu = atomic_read(&dhd->tx_compl_cpu);
2141         if (!cpu_online(cpu))
2142                 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2143         else
2144                 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2145         put_online_cpus();
2146 }
2147
2148 #endif /* DHD_LB_RXC */
2149
2150
2151 #if defined(DHD_LB_RXP)
2152 /**
2153  * dhd_napi_poll - Load balance napi poll function to process received
2154  * packets and send up the network stack using netif_receive_skb()
2155  *
2156  * @napi: napi object in which context this poll function is invoked
2157  * @budget: number of packets to be processed.
2158  *
2159  * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2160  * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2161  * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2162  * packet tag and sendup.
2163  */
2164 static int
2165 dhd_napi_poll(struct napi_struct *napi, int budget)
2166 {
2167         int ifid;
2168         const int pkt_count = 1;
2169         const int chan = 0;
2170         struct sk_buff * skb;
2171         unsigned long flags;
2172         struct dhd_info *dhd;
2173         int processed = 0;
2174         struct sk_buff_head rx_process_queue;
2175
2176         dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2177         DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2178                 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2179
2180         __skb_queue_head_init(&rx_process_queue);
2181
2182         /* extract the entire rx_napi_queue into local rx_process_queue */
2183         spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2184         skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2185         spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2186
2187         while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2188                 OSL_PREFETCH(skb->data);
2189
2190                 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2191
2192                 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2193                         __FUNCTION__, skb, ifid));
2194
2195                 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2196                 processed++;
2197         }
2198
2199         DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2200
2201         DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2202         napi_complete(napi);
2203
2204         return budget - 1;
2205 }
2206
2207 /**
2208  * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2209  * poll list. This function may be invoked via the smp_call_function_single
2210  * from a remote CPU.
2211  *
2212  * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2213  * after the napi_struct is added to the softnet data's poll_list
2214  *
2215  * @info: pointer to a dhd_info struct
2216  */
2217 static void
2218 dhd_napi_schedule(void *info)
2219 {
2220         dhd_info_t *dhd = (dhd_info_t *)info;
2221
2222         DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2223                 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2224
2225         /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2226         if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2227                 __napi_schedule(&dhd->rx_napi_struct);
2228                 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2229         }
2230
2231         /*
2232          * If the rx_napi_struct was already running, then we let it complete
2233          * processing all its packets. The rx_napi_struct may only run on one
2234          * core at a time, to avoid out-of-order handling.
2235          */
2236 }
2237
2238 /**
2239  * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2240  * action after placing the dhd's rx_process napi object in the the remote CPU's
2241  * softnet data's poll_list.
2242  *
2243  * @dhd: dhd_info which has the rx_process napi object
2244  * @on_cpu: desired remote CPU id
2245  */
2246 static INLINE int
2247 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2248 {
2249         int wait = 0; /* asynchronous IPI */
2250
2251         DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2252                 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2253
2254         if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2255                 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2256                         __FUNCTION__, on_cpu));
2257         }
2258
2259         DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2260
2261         return 0;
2262 }
2263
2264 /*
2265  * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2266  * Why should we do this?
2267  * The candidacy algorithm is run from the call back function
2268  * registered to CPU hotplug notifier. This call back happens from Worker
2269  * context. The dhd_napi_schedule_on is also from worker context.
2270  * Note that both of this can run on two different CPUs at the same time.
2271  * So we can possibly have a window where a given CPUn is being brought
2272  * down from CPUm while we try to run a function on CPUn.
2273  * To prevent this its better have the whole code to execute an SMP
2274  * function under get_online_cpus.
2275  * This function call ensures that hotplug mechanism does not kick-in
2276  * until we are done dealing with online CPUs
2277  * If the hotplug worker is already running, no worries because the
2278  * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2279  *
2280  * The below mentioned code structure is proposed in
2281  * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2282  * for the question
2283  * Q: I need to ensure that a particular cpu is not removed when there is some
2284  *    work specific to this cpu is in progress
2285  *
2286  * According to the documentation calling get_online_cpus is NOT required, if
2287  * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2288  * run from Work Queue context we have to call these functions
2289  */
2290 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2291 {
2292         struct dhd_info *dhd =
2293                 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2294         int cpu;
2295
2296         get_online_cpus();
2297         cpu = atomic_read(&dhd->rx_napi_cpu);
2298         if (!cpu_online(cpu))
2299                 dhd_napi_schedule(dhd);
2300         else
2301                 dhd_napi_schedule_on(dhd, cpu);
2302         put_online_cpus();
2303 }
2304
2305 /**
2306  * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2307  * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2308  * the packets enqueued into the rx_napi_queue and sendup.
2309  * The producer's rx packet queue is appended to the rx_napi_queue before
2310  * dispatching the rx_napi_struct.
2311  */
2312 void
2313 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2314 {
2315         unsigned long flags;
2316         dhd_info_t *dhd = dhdp->info;
2317         int curr_cpu;
2318         int on_cpu;
2319
2320         if (dhd->rx_napi_netdev == NULL) {
2321                 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2322                 return;
2323         }
2324
2325         DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2326                 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2327
2328         /* append the producer's queue of packets to the napi's rx process queue */
2329         spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2330         skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2331         spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2332
2333         /*
2334          * If the destination CPU is NOT online or is same as current CPU
2335          * no need to schedule the work
2336          */
2337         curr_cpu = get_cpu();
2338         put_cpu();
2339
2340         on_cpu = atomic_read(&dhd->rx_napi_cpu);
2341
2342         if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2343                 dhd_napi_schedule(dhd);
2344         } else {
2345                 schedule_work(&dhd->rx_napi_dispatcher_work);
2346         }
2347 }
2348
2349 /**
2350  * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2351  */
2352 void
2353 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2354 {
2355         dhd_info_t *dhd = dhdp->info;
2356
2357         DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2358                 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2359         DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2360         __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2361 }
2362 #endif /* DHD_LB_RXP */
2363
2364 #endif /* DHD_LB */
2365
2366 static void dhd_memdump_work_handler(struct work_struct * work)
2367 {
2368         struct dhd_info *dhd =
2369                 container_of(work, struct dhd_info, dhd_memdump_work.work);
2370
2371         BCM_REFERENCE(dhd);
2372 #ifdef BCMPCIE
2373         dhd_prot_collect_memdump(&dhd->pub);
2374 #endif
2375 }
2376
2377
2378 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2379 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2380 {
2381         dhd_if_t *ifp;
2382         dhd_info_t *dhd = dhdp->info;
2383         int i;
2384
2385         ASSERT(bssidx < DHD_MAX_IFS);
2386         ASSERT(dhdp);
2387
2388         for (i = 0; i < DHD_MAX_IFS; i++) {
2389                 ifp = dhd->iflist[i];
2390                 if (ifp && (ifp->bssidx == bssidx)) {
2391                         DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2392                                 ifp->name, bssidx, i));
2393                         break;
2394                 }
2395         }
2396         return i;
2397 }
2398
2399 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2400 {
2401         uint32 store_idx;
2402         uint32 sent_idx;
2403
2404         if (!skb) {
2405                 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2406                 return BCME_ERROR;
2407         }
2408
2409         dhd_os_rxflock(dhdp);
2410         store_idx = dhdp->store_idx;
2411         sent_idx = dhdp->sent_idx;
2412         if (dhdp->skbbuf[store_idx] != NULL) {
2413                 /* Make sure the previous packets are processed */
2414                 dhd_os_rxfunlock(dhdp);
2415 #ifdef RXF_DEQUEUE_ON_BUSY
2416                 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2417                         skb, store_idx, sent_idx));
2418                 return BCME_BUSY;
2419 #else /* RXF_DEQUEUE_ON_BUSY */
2420                 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2421                         skb, store_idx, sent_idx));
2422                 /* removed msleep here, should use wait_event_timeout if we
2423                  * want to give rx frame thread a chance to run
2424                  */
2425 #if defined(WAIT_DEQUEUE)
2426                 OSL_SLEEP(1);
2427 #endif
2428                 return BCME_ERROR;
2429 #endif /* RXF_DEQUEUE_ON_BUSY */
2430         }
2431         DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2432                 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2433         dhdp->skbbuf[store_idx] = skb;
2434         dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2435         dhd_os_rxfunlock(dhdp);
2436
2437         return BCME_OK;
2438 }
2439
2440 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2441 {
2442         uint32 store_idx;
2443         uint32 sent_idx;
2444         void *skb;
2445
2446         dhd_os_rxflock(dhdp);
2447
2448         store_idx = dhdp->store_idx;
2449         sent_idx = dhdp->sent_idx;
2450         skb = dhdp->skbbuf[sent_idx];
2451
2452         if (skb == NULL) {
2453                 dhd_os_rxfunlock(dhdp);
2454                 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2455                         store_idx, sent_idx));
2456                 return NULL;
2457         }
2458
2459         dhdp->skbbuf[sent_idx] = NULL;
2460         dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2461
2462         DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2463                 skb, sent_idx));
2464
2465         dhd_os_rxfunlock(dhdp);
2466
2467         return skb;
2468 }
2469
2470 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2471 {
2472         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2473
2474         if (prepost) { /* pre process */
2475                 dhd_read_macaddr(dhd);
2476         } else { /* post process */
2477                 dhd_write_macaddr(&dhd->pub.mac);
2478         }
2479
2480         return 0;
2481 }
2482
2483 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2484 static bool
2485 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2486 {
2487         bool _apply = FALSE;
2488         /* In case of IBSS mode, apply arp pkt filter */
2489         if (op_mode & DHD_FLAG_IBSS_MODE) {
2490                 _apply = TRUE;
2491                 goto exit;
2492         }
2493         /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2494         if ((dhd->arp_version == 1) &&
2495                 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2496                 _apply = TRUE;
2497                 goto exit;
2498         }
2499
2500 exit:
2501         return _apply;
2502 }
2503 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2504
2505 void dhd_set_packet_filter(dhd_pub_t *dhd)
2506 {
2507 #ifdef PKT_FILTER_SUPPORT
2508         int i;
2509
2510         DHD_TRACE(("%s: enter\n", __FUNCTION__));
2511         if (dhd_pkt_filter_enable) {
2512                 for (i = 0; i < dhd->pktfilter_count; i++) {
2513                         dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2514                 }
2515         }
2516 #endif /* PKT_FILTER_SUPPORT */
2517 }
2518
2519 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2520 {
2521 #ifdef PKT_FILTER_SUPPORT
2522         int i;
2523
2524         DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2525
2526         if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2527                 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2528                 return;
2529         }
2530         /* 1 - Enable packet filter, only allow unicast packet to send up */
2531         /* 0 - Disable packet filter */
2532         if (dhd_pkt_filter_enable && (!value ||
2533             (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2534         {
2535                 for (i = 0; i < dhd->pktfilter_count; i++) {
2536 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2537                         if (value && (i == DHD_ARP_FILTER_NUM) &&
2538                                 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2539                                 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2540                                         "val %d, cnt %d, op_mode 0x%x\n",
2541                                         value, i, dhd->op_mode));
2542                                 continue;
2543                         }
2544 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2545                         dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2546                                 value, dhd_master_mode);
2547                 }
2548         }
2549 #endif /* PKT_FILTER_SUPPORT */
2550 }
2551
2552 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2553 {
2554 #ifndef SUPPORT_PM2_ONLY
2555         int power_mode = PM_MAX;
2556 #endif /* SUPPORT_PM2_ONLY */
2557 #ifdef SUPPORT_SENSORHUB
2558         uint32 shub_msreq;
2559 #endif /* SUPPORT_SENSORHUB */
2560         /* wl_pkt_filter_enable_t       enable_parm; */
2561         char iovbuf[32];
2562         int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2563 #ifdef DHD_USE_EARLYSUSPEND
2564 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2565         int bcn_timeout = 0;
2566 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2567 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2568         int roam_time_thresh = 0;       /* (ms) */
2569 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2570 #ifndef ENABLE_FW_ROAM_SUSPEND
2571         uint roamvar = dhd->conf->roam_off_suspend;
2572 #endif /* ENABLE_FW_ROAM_SUSPEND */
2573 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2574         int bcn_li_bcn;
2575 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2576         uint nd_ra_filter = 0;
2577         int ret = 0;
2578 #endif /* DHD_USE_EARLYSUSPEND */
2579 #ifdef PASS_ALL_MCAST_PKTS
2580         struct dhd_info *dhdinfo;
2581         uint32 allmulti;
2582         uint i;
2583 #endif /* PASS_ALL_MCAST_PKTS */
2584 #ifdef DYNAMIC_SWOOB_DURATION
2585 #ifndef CUSTOM_INTR_WIDTH
2586 #define CUSTOM_INTR_WIDTH 100
2587         int intr_width = 0;
2588 #endif /* CUSTOM_INTR_WIDTH */
2589 #endif /* DYNAMIC_SWOOB_DURATION */
2590
2591         if (!dhd)
2592                 return -ENODEV;
2593
2594 #ifdef PASS_ALL_MCAST_PKTS
2595         dhdinfo = dhd->info;
2596 #endif /* PASS_ALL_MCAST_PKTS */
2597
2598         DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2599                 __FUNCTION__, value, dhd->in_suspend));
2600
2601         dhd_suspend_lock(dhd);
2602
2603 #ifdef CUSTOM_SET_CPUCORE
2604         DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2605         /* set specific cpucore */
2606         dhd_set_cpucore(dhd, TRUE);
2607 #endif /* CUSTOM_SET_CPUCORE */
2608 #ifndef SUPPORT_PM2_ONLY
2609         if (dhd->conf->pm >= 0)
2610                 power_mode = dhd->conf->pm;
2611 #endif /* SUPPORT_PM2_ONLY */
2612         if (dhd->up) {
2613                 if (value && dhd->in_suspend) {
2614 #ifdef PKT_FILTER_SUPPORT
2615                         dhd->early_suspended = 1;
2616 #endif
2617                         /* Kernel suspended */
2618                         DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2619
2620 #ifdef SUPPORT_SENSORHUB
2621                         shub_msreq = 1;
2622                         if (dhd->info->shub_enable == 1) {
2623                                 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2624                                         iovbuf, sizeof(iovbuf));
2625                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2626                                         iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2627                                         DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2628                                                 __FUNCTION__, ret));
2629                                 }
2630                         }
2631 #endif /* SUPPORT_SENSORHUB */
2632
2633 #ifndef SUPPORT_PM2_ONLY
2634                         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2635                                 sizeof(power_mode), TRUE, 0);
2636 #endif /* SUPPORT_PM2_ONLY */
2637
2638 #ifdef PKT_FILTER_SUPPORT
2639                         /* Enable packet filter,
2640                          * only allow unicast packet to send up
2641                          */
2642                         dhd_enable_packet_filter(1, dhd);
2643 #endif /* PKT_FILTER_SUPPORT */
2644
2645 #ifdef PASS_ALL_MCAST_PKTS
2646                         allmulti = 0;
2647                         bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2648                                 iovbuf, sizeof(iovbuf));
2649                         for (i = 0; i < DHD_MAX_IFS; i++) {
2650                                 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2651                                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2652                                                 sizeof(iovbuf), TRUE, i);
2653                         }
2654 #endif /* PASS_ALL_MCAST_PKTS */
2655
2656                         /* If DTIM skip is set up as default, force it to wake
2657                          * each third DTIM for better power savings.  Note that
2658                          * one side effect is a chance to miss BC/MC packet.
2659                          */
2660 #ifdef WLTDLS
2661                         /* Do not set bcn_li_ditm on WFD mode */
2662                         if (dhd->tdls_mode) {
2663                                 bcn_li_dtim = 0;
2664                         } else
2665 #endif /* WLTDLS */
2666                         bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2667                         bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2668                                 4, iovbuf, sizeof(iovbuf));
2669                         if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2670                                 TRUE, 0) < 0)
2671                                         DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2672
2673 #ifdef DHD_USE_EARLYSUSPEND
2674 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2675                         bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2676                         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2677                                 4, iovbuf, sizeof(iovbuf));
2678                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2679 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2680 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2681                         roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2682                         bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2683                                 4, iovbuf, sizeof(iovbuf));
2684                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2685 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2686 #ifndef ENABLE_FW_ROAM_SUSPEND
2687                         /* Disable firmware roaming during suspend */
2688                         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2689                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2690 #endif /* ENABLE_FW_ROAM_SUSPEND */
2691 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2692                         bcn_li_bcn = 0;
2693                         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2694                                 4, iovbuf, sizeof(iovbuf));
2695                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2696 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2697                         if (FW_SUPPORTED(dhd, ndoe)) {
2698                                 /* enable IPv6 RA filter in  firmware during suspend */
2699                                 nd_ra_filter = 1;
2700                                 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2701                                         iovbuf, sizeof(iovbuf));
2702                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2703                                         sizeof(iovbuf), TRUE, 0)) < 0)
2704                                         DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2705                                                 ret));
2706                         }
2707 #ifdef DYNAMIC_SWOOB_DURATION
2708                         intr_width = CUSTOM_INTR_WIDTH;
2709                         bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2710                                 iovbuf, sizeof(iovbuf));
2711                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2712                                         sizeof(iovbuf), TRUE, 0)) < 0) {
2713                                 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2714                         }
2715 #endif /* DYNAMIC_SWOOB_DURATION */
2716 #endif /* DHD_USE_EARLYSUSPEND */
2717                 } else {
2718 #ifdef PKT_FILTER_SUPPORT
2719                         dhd->early_suspended = 0;
2720 #endif
2721                         /* Kernel resumed  */
2722                         DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2723
2724 #ifdef SUPPORT_SENSORHUB
2725                         shub_msreq = 0;
2726                         if (dhd->info->shub_enable == 1) {
2727                                 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2728                                         4, iovbuf, sizeof(iovbuf));
2729                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2730                                         iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2731                                                 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2732                                                         "failed %d\n", __FUNCTION__, ret));
2733                                 }
2734                         }
2735 #endif /* SUPPORT_SENSORHUB */
2736
2737
2738 #ifdef DYNAMIC_SWOOB_DURATION
2739                         intr_width = 0;
2740                         bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2741                                 iovbuf, sizeof(iovbuf));
2742                         if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2743                                         sizeof(iovbuf), TRUE, 0)) < 0) {
2744                                 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2745                         }
2746 #endif /* DYNAMIC_SWOOB_DURATION */
2747 #ifndef SUPPORT_PM2_ONLY
2748                         power_mode = PM_FAST;
2749                         dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2750                                 sizeof(power_mode), TRUE, 0);
2751 #endif /* SUPPORT_PM2_ONLY */
2752 #ifdef PKT_FILTER_SUPPORT
2753                         /* disable pkt filter */
2754                         dhd_enable_packet_filter(0, dhd);
2755 #endif /* PKT_FILTER_SUPPORT */
2756 #ifdef PASS_ALL_MCAST_PKTS
2757                         allmulti = 1;
2758                         bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2759                                 iovbuf, sizeof(iovbuf));
2760                         for (i = 0; i < DHD_MAX_IFS; i++) {
2761                                 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2762                                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2763                                                 sizeof(iovbuf), TRUE, i);
2764                         }
2765 #endif /* PASS_ALL_MCAST_PKTS */
2766
2767                         /* restore pre-suspend setting for dtim_skip */
2768                         bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2769                                 4, iovbuf, sizeof(iovbuf));
2770
2771                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2772 #ifdef DHD_USE_EARLYSUSPEND
2773 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2774                         bcn_timeout = CUSTOM_BCN_TIMEOUT;
2775                         bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2776                                 4, iovbuf, sizeof(iovbuf));
2777                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2778 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2779 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2780                         roam_time_thresh = 2000;
2781                         bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2782                                 4, iovbuf, sizeof(iovbuf));
2783                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2784 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2785 #ifndef ENABLE_FW_ROAM_SUSPEND
2786                         roamvar = dhd_roam_disable;
2787                         bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2788                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2789 #endif /* ENABLE_FW_ROAM_SUSPEND */
2790 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2791                         bcn_li_bcn = 1;
2792                         bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2793                                 4, iovbuf, sizeof(iovbuf));
2794                         dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2795 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2796                         if (FW_SUPPORTED(dhd, ndoe)) {
2797                                 /* disable IPv6 RA filter in  firmware during suspend */
2798                                 nd_ra_filter = 0;
2799                                 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2800                                         iovbuf, sizeof(iovbuf));
2801                                 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2802                                         sizeof(iovbuf), TRUE, 0)) < 0)
2803                                         DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2804                                                 ret));
2805                         }
2806 #endif /* DHD_USE_EARLYSUSPEND */
2807                 }
2808         }
2809         dhd_suspend_unlock(dhd);
2810
2811         return 0;
2812 }
2813
2814 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2815 {
2816         dhd_pub_t *dhdp = &dhd->pub;
2817         int ret = 0;
2818
2819         DHD_OS_WAKE_LOCK(dhdp);
2820         DHD_PERIM_LOCK(dhdp);
2821
2822         /* Set flag when early suspend was called */
2823         dhdp->in_suspend = val;
2824         if ((force || !dhdp->suspend_disable_flag) &&
2825                 dhd_support_sta_mode(dhdp))
2826         {
2827                 ret = dhd_set_suspend(val, dhdp);
2828         }
2829
2830         DHD_PERIM_UNLOCK(dhdp);
2831         DHD_OS_WAKE_UNLOCK(dhdp);
2832         return ret;
2833 }
2834
2835 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2836 static void dhd_early_suspend(struct early_suspend *h)
2837 {
2838         struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2839         DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2840
2841         if (dhd)
2842                 dhd_suspend_resume_helper(dhd, 1, 0);
2843 }
2844
2845 static void dhd_late_resume(struct early_suspend *h)
2846 {
2847         struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2848         DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2849
2850         if (dhd)
2851                 dhd_suspend_resume_helper(dhd, 0, 0);
2852 }
2853 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2854
2855 /*
2856  * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
2857  * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
2858  *
2859  *      dhd_timeout_start(&tmo, usec);
2860  *      while (!dhd_timeout_expired(&tmo))
2861  *              if (poll_something())
2862  *                      break;
2863  *      if (dhd_timeout_expired(&tmo))
2864  *              fatal();
2865  */
2866
2867 void
2868 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2869 {
2870         tmo->limit = usec;
2871         tmo->increment = 0;
2872         tmo->elapsed = 0;
2873         tmo->tick = jiffies_to_usecs(1);
2874 }
2875
2876 int
2877 dhd_timeout_expired(dhd_timeout_t *tmo)
2878 {
2879         /* Does nothing the first call */
2880         if (tmo->increment == 0) {
2881                 tmo->increment = 1;
2882                 return 0;
2883         }
2884
2885         if (tmo->elapsed >= tmo->limit)
2886                 return 1;
2887
2888         /* Add the delay that's about to take place */
2889         tmo->elapsed += tmo->increment;
2890
2891         if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2892                 OSL_DELAY(tmo->increment);
2893                 tmo->increment *= 2;
2894                 if (tmo->increment > tmo->tick)
2895                         tmo->increment = tmo->tick;
2896         } else {
2897                 wait_queue_head_t delay_wait;
2898                 DECLARE_WAITQUEUE(wait, current);
2899                 init_waitqueue_head(&delay_wait);
2900                 add_wait_queue(&delay_wait, &wait);
2901                 set_current_state(TASK_INTERRUPTIBLE);
2902                 (void)schedule_timeout(1);
2903                 remove_wait_queue(&delay_wait, &wait);
2904                 set_current_state(TASK_RUNNING);
2905         }
2906
2907         return 0;
2908 }
2909
2910 int
2911 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2912 {
2913         int i = 0;
2914
2915         if (!dhd) {
2916                 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2917                 return DHD_BAD_IF;
2918         }
2919
2920         while (i < DHD_MAX_IFS) {
2921                 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2922                         return i;
2923                 i++;
2924         }
2925
2926         return DHD_BAD_IF;
2927 }
2928
2929 struct net_device * dhd_idx2net(void *pub, int ifidx)
2930 {
2931         struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2932         struct dhd_info *dhd_info;
2933
2934         if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2935                 return NULL;
2936         dhd_info = dhd_pub->info;
2937         if (dhd_info && dhd_info->iflist[ifidx])
2938                 return dhd_info->iflist[ifidx]->net;
2939         return NULL;
2940 }
2941
2942 int
2943 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2944 {
2945         int i = DHD_MAX_IFS;
2946
2947         ASSERT(dhd);
2948
2949         if (name == NULL || *name == '\0')
2950                 return 0;
2951
2952         while (--i > 0)
2953                 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2954                                 break;
2955
2956         DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2957
2958         return i;       /* default - the primary interface */
2959 }
2960
2961 char *
2962 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2963 {
2964         dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2965
2966         ASSERT(dhd);
2967
2968         if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2969                 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2970                 return "<if_bad>";
2971         }
2972
2973         if (dhd->iflist[ifidx] == NULL) {
2974                 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2975                 return "<if_null>";
2976         }
2977
2978         if (dhd->iflist[ifidx]->net)
2979                 return dhd->iflist[ifidx]->net->name;
2980
2981         return "<if_none>";
2982 }
2983
2984 uint8 *
2985 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2986 {
2987         int i;
2988         dhd_info_t *dhd = (dhd_info_t *)dhdp;
2989
2990         ASSERT(dhd);
2991         for (i = 0; i < DHD_MAX_IFS; i++)
2992         if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2993                 return dhd->iflist[i]->mac_addr;
2994
2995         return NULL;
2996 }
2997
2998
2999 static void
3000 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3001 {
3002         struct net_device *dev;
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3004         struct netdev_hw_addr *ha;
3005 #else
3006         struct dev_mc_list *mclist;
3007 #endif
3008         uint32 allmulti, cnt;
3009
3010         wl_ioctl_t ioc;
3011         char *buf, *bufp;
3012         uint buflen;
3013         int ret;
3014
3015         if (!dhd->iflist[ifidx]) {
3016                 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3017                 return;
3018         }
3019         dev = dhd->iflist[ifidx]->net;
3020         if (!dev)
3021                 return;
3022 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3023         netif_addr_lock_bh(dev);
3024 #endif /* LINUX >= 2.6.27 */
3025 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3026         cnt = netdev_mc_count(dev);
3027 #else
3028         cnt = dev->mc_count;
3029 #endif /* LINUX >= 2.6.35 */
3030 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3031         netif_addr_unlock_bh(dev);
3032 #endif /* LINUX >= 2.6.27 */
3033
3034         /* Determine initial value of allmulti flag */
3035         allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3036
3037 #ifdef PASS_ALL_MCAST_PKTS
3038 #ifdef PKT_FILTER_SUPPORT
3039         if (!dhd->pub.early_suspended)
3040 #endif /* PKT_FILTER_SUPPORT */
3041                 allmulti = TRUE;
3042 #endif /* PASS_ALL_MCAST_PKTS */
3043
3044         /* Send down the multicast list first. */
3045
3046
3047         buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3048         if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3049                 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3050                         dhd_ifname(&dhd->pub, ifidx), cnt));
3051                 return;
3052         }
3053
3054         strncpy(bufp, "mcast_list", buflen - 1);
3055         bufp[buflen - 1] = '\0';
3056         bufp += strlen("mcast_list") + 1;
3057
3058         cnt = htol32(cnt);
3059         memcpy(bufp, &cnt, sizeof(cnt));
3060         bufp += sizeof(cnt);
3061
3062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3063         netif_addr_lock_bh(dev);
3064 #endif /* LINUX >= 2.6.27 */
3065 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3066         netdev_for_each_mc_addr(ha, dev) {
3067                 if (!cnt)
3068                         break;
3069                 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3070                 bufp += ETHER_ADDR_LEN;
3071                 cnt--;
3072         }
3073 #else /* LINUX < 2.6.35 */
3074         for (mclist = dev->mc_list; (mclist && (cnt > 0));
3075                         cnt--, mclist = mclist->next) {
3076                 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3077                 bufp += ETHER_ADDR_LEN;
3078         }
3079 #endif /* LINUX >= 2.6.35 */
3080 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3081         netif_addr_unlock_bh(dev);
3082 #endif /* LINUX >= 2.6.27 */
3083
3084         memset(&ioc, 0, sizeof(ioc));
3085         ioc.cmd = WLC_SET_VAR;
3086         ioc.buf = buf;
3087         ioc.len = buflen;
3088         ioc.set = TRUE;
3089
3090         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3091         if (ret < 0) {
3092                 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3093                         dhd_ifname(&dhd->pub, ifidx), cnt));
3094                 allmulti = cnt ? TRUE : allmulti;
3095         }
3096
3097         MFREE(dhd->pub.osh, buf, buflen);
3098
3099         /* Now send the allmulti setting.  This is based on the setting in the
3100          * net_device flags, but might be modified above to be turned on if we
3101          * were trying to set some addresses and dongle rejected it...
3102          */
3103
3104         buflen = sizeof("allmulti") + sizeof(allmulti);
3105         if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3106                 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3107                 return;
3108         }
3109         allmulti = htol32(allmulti);
3110
3111         if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3112                 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3113                            dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3114                 MFREE(dhd->pub.osh, buf, buflen);
3115                 return;
3116         }
3117
3118
3119         memset(&ioc, 0, sizeof(ioc));
3120         ioc.cmd = WLC_SET_VAR;
3121         ioc.buf = buf;
3122         ioc.len = buflen;
3123         ioc.set = TRUE;
3124
3125         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3126         if (ret < 0) {
3127                 DHD_ERROR(("%s: set allmulti %d failed\n",
3128                            dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3129         }
3130
3131         MFREE(dhd->pub.osh, buf, buflen);
3132
3133         /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3134
3135         allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3136
3137         allmulti = htol32(allmulti);
3138
3139         memset(&ioc, 0, sizeof(ioc));
3140         ioc.cmd = WLC_SET_PROMISC;
3141         ioc.buf = &allmulti;
3142         ioc.len = sizeof(allmulti);
3143         ioc.set = TRUE;
3144
3145         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3146         if (ret < 0) {
3147                 DHD_ERROR(("%s: set promisc %d failed\n",
3148                            dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3149         }
3150 }
3151
3152 int
3153 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3154 {
3155         char buf[32];
3156         wl_ioctl_t ioc;
3157         int ret;
3158
3159         if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3160                 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3161                 return -1;
3162         }
3163         memset(&ioc, 0, sizeof(ioc));
3164         ioc.cmd = WLC_SET_VAR;
3165         ioc.buf = buf;
3166         ioc.len = 32;
3167         ioc.set = TRUE;
3168
3169         ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3170         if (ret < 0) {
3171                 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3172         } else {
3173                 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3174                 if (ifidx == 0)
3175                         memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3176         }
3177
3178         return ret;
3179 }
3180
3181 #ifdef SOFTAP
3182 extern struct net_device *ap_net_dev;
3183 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3184 #endif
3185
3186 #ifdef DHD_PSTA
3187 /* Get psta/psr configuration configuration */
3188 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3189 {
3190         dhd_info_t *dhd = dhdp->info;
3191         return (int)dhd->psta_mode;
3192 }
3193 /* Set psta/psr configuration configuration */
3194 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3195 {
3196         dhd_info_t *dhd = dhdp->info;
3197         dhd->psta_mode = val;
3198         return 0;
3199 }
3200 #endif /* DHD_PSTA */
3201
3202 static void
3203 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3204 {
3205         dhd_info_t *dhd = handle;
3206         dhd_if_event_t *if_event = event_info;
3207         struct net_device *ndev;
3208         int ifidx, bssidx;
3209         int ret;
3210 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3211         struct wireless_dev *vwdev, *primary_wdev;
3212         struct net_device *primary_ndev;
3213 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3214
3215         if (event != DHD_WQ_WORK_IF_ADD) {
3216                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3217                 return;
3218         }
3219
3220         if (!dhd) {
3221                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3222                 return;
3223         }
3224
3225         if (!if_event) {
3226                 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3227                 return;
3228         }
3229
3230         dhd_net_if_lock_local(dhd);
3231         DHD_OS_WAKE_LOCK(&dhd->pub);
3232         DHD_PERIM_LOCK(&dhd->pub);
3233
3234         ifidx = if_event->event.ifidx;
3235         bssidx = if_event->event.bssidx;
3236         DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3237
3238         /* This path is for non-android case */
3239         /* The interface name in host and in event msg are same */
3240         /* if name in event msg is used to create dongle if list on host */
3241         ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3242                 if_event->mac, bssidx, TRUE, if_event->name);
3243         if (!ndev) {
3244                 DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
3245                 goto done;
3246         }
3247
3248 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3249         vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3250         if (unlikely(!vwdev)) {
3251                 DHD_ERROR(("Could not allocate wireless device\n"));
3252                 goto done;
3253         }
3254         primary_ndev = dhd->pub.info->iflist[0]->net;
3255         primary_wdev = ndev_to_wdev(primary_ndev);
3256         vwdev->wiphy = primary_wdev->wiphy;
3257         vwdev->iftype = if_event->event.role;
3258         vwdev->netdev = ndev;
3259         ndev->ieee80211_ptr = vwdev;
3260         SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3261         DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3262 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3263
3264         DHD_PERIM_UNLOCK(&dhd->pub);
3265         ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3266         DHD_PERIM_LOCK(&dhd->pub);
3267         if (ret != BCME_OK) {
3268                 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3269                 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3270                 goto done;
3271         }
3272 #ifdef PCIE_FULL_DONGLE
3273         /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3274         if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3275                 char iovbuf[WLC_IOCTL_SMLEN];
3276                 uint32 var_int =  1;
3277
3278                 memset(iovbuf, 0, sizeof(iovbuf));
3279                 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3280                 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3281
3282                 if (ret != BCME_OK) {
3283                         DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3284                         dhd_remove_if(&dhd->pub, ifidx, TRUE);
3285                 }
3286         }
3287 #endif /* PCIE_FULL_DONGLE */
3288
3289 done:
3290         MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3291
3292         DHD_PERIM_UNLOCK(&dhd->pub);
3293         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3294         dhd_net_if_unlock_local(dhd);
3295 }
3296
3297 static void
3298 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3299 {
3300         dhd_info_t *dhd = handle;
3301         int ifidx;
3302         dhd_if_event_t *if_event = event_info;
3303
3304
3305         if (event != DHD_WQ_WORK_IF_DEL) {
3306                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3307                 return;
3308         }
3309
3310         if (!dhd) {
3311                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3312                 return;
3313         }
3314
3315         if (!if_event) {
3316                 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3317                 return;
3318         }
3319
3320         dhd_net_if_lock_local(dhd);
3321         DHD_OS_WAKE_LOCK(&dhd->pub);
3322         DHD_PERIM_LOCK(&dhd->pub);
3323
3324         ifidx = if_event->event.ifidx;
3325         DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3326
3327         DHD_PERIM_UNLOCK(&dhd->pub);
3328         dhd_remove_if(&dhd->pub, ifidx, TRUE);
3329         DHD_PERIM_LOCK(&dhd->pub);
3330
3331         MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3332
3333         DHD_PERIM_UNLOCK(&dhd->pub);
3334         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3335         dhd_net_if_unlock_local(dhd);
3336 }
3337
3338 static void
3339 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3340 {
3341         dhd_info_t *dhd = handle;
3342         dhd_if_t *ifp = event_info;
3343
3344         if (event != DHD_WQ_WORK_SET_MAC) {
3345                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3346         }
3347
3348         if (!dhd) {
3349                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3350                 return;
3351         }
3352
3353         dhd_net_if_lock_local(dhd);
3354         DHD_OS_WAKE_LOCK(&dhd->pub);
3355         DHD_PERIM_LOCK(&dhd->pub);
3356
3357 #ifdef SOFTAP
3358         {
3359                 unsigned long flags;
3360                 bool in_ap = FALSE;
3361                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3362                 in_ap = (ap_net_dev != NULL);
3363                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3364
3365                 if (in_ap)  {
3366                         DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3367                                    ifp->net->name));
3368                         goto done;
3369                 }
3370         }
3371 #endif /* SOFTAP */
3372
3373         if (ifp == NULL || !dhd->pub.up) {
3374                 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3375                 goto done;
3376         }
3377
3378         DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3379         ifp->set_macaddress = FALSE;
3380         if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3381                 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3382         else
3383                 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3384
3385 done:
3386         DHD_PERIM_UNLOCK(&dhd->pub);
3387         DHD_OS_WAKE_UNLOCK(&dhd->pub);
3388         dhd_net_if_unlock_local(dhd);
3389 }
3390
3391 static void
3392 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3393 {
3394         dhd_info_t *dhd = handle;
3395         dhd_if_t *ifp = event_info;
3396         int ifidx;
3397
3398         if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3399                 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3400                 return;
3401         }
3402
3403         if (!dhd) {
3404                 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3405                 return;
3406         }
3407
3408         dhd_net_if_lock_local(dhd);
3409         DHD_OS_WAKE_LOCK(&dhd->pub);
3410         DHD_PERIM_LOCK(&dhd->pub);
3411
3412 #ifdef SOFTAP
3413         {
3414                 bool in_ap = FALSE;
3415                 unsigned long flags;
3416                 DHD_GENERAL_LOCK(&dhd->pub, flags);
3417                 in_ap = (ap_net_dev != NULL);
3418                 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3419
3420                 if (in_ap)  {
3421                         DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3422                                    ifp->net->name));
3423                         ifp->set_multicast = FALSE;
3424                         goto done;
3425                 }
3426         }
<