2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2016, Broadcom Corporation
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
26 * <<Broadcom-WL-IPTag/Open:>>
28 * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/etherdevice.h>
48 #include <linux/random.h>
49 #include <linux/spinlock.h>
50 #include <linux/ethtool.h>
51 #include <linux/fcntl.h>
54 #include <linux/reboot.h>
55 #include <linux/notifier.h>
56 #include <net/addrconf.h>
57 #ifdef ENABLE_ADAPTIVE_SCHED
58 #include <linux/cpufreq.h>
59 #endif /* ENABLE_ADAPTIVE_SCHED */
61 #include <asm/uaccess.h>
62 #include <asm/unaligned.h>
66 #include <bcmendian.h>
69 #include <proto/ethernet.h>
70 #include <proto/bcmevent.h>
71 #include <proto/vlan.h>
72 #include <proto/802.3.h>
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
88 #ifdef CONFIG_HAS_WAKELOCK
89 #include <linux/wakelock.h>
92 #include <wl_cfg80211.h>
102 #include <linux/compat.h>
106 #include <dhd_wmf_linux.h>
110 #include <proto/bcmicmp.h>
111 #include <bcm_l2_filter.h>
112 #include <dhd_l2_filter.h>
113 #endif /* DHD_L2_FILTER */
116 #include <dhd_psta.h>
117 #endif /* DHD_PSTA */
120 #ifdef DHDTCPACK_SUPPRESS
122 #endif /* DHDTCPACK_SUPPRESS */
124 #ifdef DHD_DEBUG_PAGEALLOC
125 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
126 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
127 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
128 #endif /* DHD_DEBUG_PAGEALLOC */
132 /* Dynamic CPU selection for load balancing */
133 #include <linux/cpu.h>
134 #include <linux/cpumask.h>
135 #include <linux/notifier.h>
136 #include <linux/workqueue.h>
137 #include <asm/atomic.h>
139 #if !defined(DHD_LB_PRIMARY_CPUS)
140 #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
143 #if !defined(DHD_LB_SECONDARY_CPUS)
144 #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
147 #define HIST_BIN_SIZE 8
149 #if defined(DHD_LB_RXP)
150 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
151 #endif /* DHD_LB_RXP */
156 #include <linux/time.h>
159 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
160 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
161 #define TSMAX 1000 /* max no. of timing record kept */
164 static uint32 tsidx = 0;
165 static uint32 htsf_seqnum = 0;
167 struct timeval tsync;
168 static uint32 tsport = 5010;
170 typedef struct histo_ {
174 #if !ISPOWEROF2(DHD_SDALIGN)
175 #error DHD_SDALIGN is not a power of 2!
178 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
179 #endif /* WLMEDIA_HTSF */
184 #endif /* quote_str */
187 #endif /* quote_str */
189 #define quote_str(s) to_str(s)
191 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
192 #endif /* STBLINUX */
196 extern bool ap_cfg_running;
197 extern bool ap_fw_loaded;
199 extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
201 #ifdef FIX_CPU_MIN_CLOCK
202 #include <linux/pm_qos.h>
203 #endif /* FIX_CPU_MIN_CLOCK */
204 #ifdef SET_RANDOM_MAC_SOFTAP
205 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
206 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
208 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
209 #endif /* SET_RANDOM_MAC_SOFTAP */
210 #ifdef ENABLE_ADAPTIVE_SCHED
211 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
212 #ifndef CUSTOM_CPUFREQ_THRESH
213 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
214 #endif /* CUSTOM_CPUFREQ_THRESH */
215 #endif /* ENABLE_ADAPTIVE_SCHED */
217 /* enable HOSTIP cache update from the host side when an eth0:N is up */
218 #define AOE_IP_ALIAS_SUPPORT 1
222 #include <bcm_rpc_tp.h>
225 #include <wlfc_proto.h>
226 #include <dhd_wlfc.h>
229 #include <wl_android.h>
231 /* Maximum STA per radio */
232 #define DHD_MAX_STA 32
236 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
237 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
238 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
240 #ifdef ARP_OFFLOAD_SUPPORT
241 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
242 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
243 unsigned long event, void *ptr);
244 static struct notifier_block dhd_inetaddr_notifier = {
245 .notifier_call = dhd_inetaddr_notifier_call
247 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
248 * created in kernel notifier link list (with 'next' pointing to itself)
250 static bool dhd_inetaddr_notifier_registered = FALSE;
251 #endif /* ARP_OFFLOAD_SUPPORT */
253 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
254 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
255 unsigned long event, void *ptr);
256 static struct notifier_block dhd_inet6addr_notifier = {
257 .notifier_call = dhd_inet6addr_notifier_call
259 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
260 * created in kernel notifier link list (with 'next' pointing to itself)
262 static bool dhd_inet6addr_notifier_registered = FALSE;
263 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
266 #include <linux/suspend.h>
267 volatile bool dhd_mmc_suspend = FALSE;
268 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
269 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
271 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
272 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
275 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
277 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
278 MODULE_LICENSE("GPL and additional rights");
279 #endif /* LinuxVer */
284 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
286 #ifndef PROP_TXSTATUS
287 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
289 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
291 #endif /* BCM_FD_AGGR */
294 extern bool dhd_wlfc_skip_fc(void);
295 extern void dhd_wlfc_plat_init(void *dhd);
296 extern void dhd_wlfc_plat_deinit(void *dhd);
297 #endif /* PROP_TXSTATUS */
298 #ifdef USE_DYNAMIC_F2_BLKSIZE
299 extern uint sd_f2_blocksize;
300 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
301 #endif /* USE_DYNAMIC_F2_BLKSIZE */
303 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
309 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
311 /* Linux wireless extension support */
312 #if defined(WL_WIRELESS_EXT)
314 extern wl_iw_extra_params_t g_wl_iw_params;
315 #endif /* defined(WL_WIRELESS_EXT) */
317 #ifdef CONFIG_PARTIALSUSPEND_SLP
318 #include <linux/partialsuspend_slp.h>
319 #define CONFIG_HAS_EARLYSUSPEND
320 #define DHD_USE_EARLYSUSPEND
321 #define register_early_suspend register_pre_suspend
322 #define unregister_early_suspend unregister_pre_suspend
323 #define early_suspend pre_suspend
324 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
326 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
327 #include <linux/earlysuspend.h>
328 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
329 #endif /* CONFIG_PARTIALSUSPEND_SLP */
331 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
333 #ifdef PKT_FILTER_SUPPORT
334 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
335 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
336 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
341 extern int dhd_read_macaddr(struct dhd_info *dhd);
343 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
346 extern int dhd_write_macaddr(struct ether_addr *mac);
348 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
355 #ifdef DHD_FW_COREDUMP
356 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
357 #endif /* DHD_FW_COREDUMP */
359 static void dhd_log_dump_init(dhd_pub_t *dhd);
360 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
361 static void dhd_log_dump(void *handle, void *event_info, u8 event);
362 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
363 static int do_dhd_log_dump(dhd_pub_t *dhdp);
364 #endif /* DHD_LOG_DUMP */
366 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
367 static struct notifier_block dhd_reboot_notifier = {
368 .notifier_call = dhd_reboot_callback,
373 static int is_reboot = 0;
376 typedef struct dhd_if_event {
377 struct list_head list;
378 wl_event_data_if_t event;
379 char name[IFNAMSIZ+1];
380 uint8 mac[ETHER_ADDR_LEN];
383 /* Interface control information */
384 typedef struct dhd_if {
385 struct dhd_info *info; /* back pointer to dhd_info */
386 /* OS/stack specifics */
387 struct net_device *net;
388 int idx; /* iface idx in dongle */
389 uint subunit; /* subunit */
390 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
393 uint8 bssidx; /* bsscfg index for the interface */
394 bool attached; /* Delayed attachment when unset */
395 bool txflowcontrol; /* Per interface flow control indicator */
396 char name[IFNAMSIZ+1]; /* linux interface name */
397 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
398 struct net_device_stats stats;
400 dhd_wmf_t wmf; /* per bsscfg wmf setting */
402 #ifdef PCIE_FULL_DONGLE
403 struct list_head sta_list; /* sll of associated stations */
404 #if !defined(BCM_GMAC3)
405 spinlock_t sta_list_lock; /* lock for manipulating sll */
406 #endif /* ! BCM_GMAC3 */
407 #endif /* PCIE_FULL_DONGLE */
408 uint32 ap_isolate; /* ap-isolation settings */
413 arp_table_t *phnd_arp_table;
414 /* for Per BSS modification */
418 #endif /* DHD_L2_FILTER */
431 uint32 coef; /* scaling factor */
432 uint32 coefdec1; /* first decimal */
433 uint32 coefdec2; /* second decimal */
443 static tstamp_t ts[TSMAX];
444 static tstamp_t maxdelayts;
445 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
447 #endif /* WLMEDIA_HTSF */
449 struct ipv6_work_info_t {
456 typedef struct dhd_dump {
460 #endif /* DHD_DEBUG */
462 /* When Perimeter locks are deployed, any blocking calls must be preceeded
463 * with a PERIM UNLOCK and followed by a PERIM LOCK.
464 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
465 * wait_event_timeout().
468 /* Local private structure (extension of pub) */
469 typedef struct dhd_info {
470 #if defined(WL_WIRELESS_EXT)
471 wl_iw_t iw; /* wireless extensions state (must be first) */
472 #endif /* defined(WL_WIRELESS_EXT) */
474 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
476 void *adapter; /* adapter information, interrupt, fw path etc. */
477 char fw_path[PATH_MAX]; /* path to firmware image */
478 char nv_path[PATH_MAX]; /* path to nvram vars file */
479 char conf_path[PATH_MAX]; /* path to config vars file */
481 /* serialize dhd iovars */
482 struct mutex dhd_iovar_mutex;
484 struct semaphore proto_sem;
486 spinlock_t wlfc_spinlock;
488 #endif /* PROP_TXSTATUS */
492 wait_queue_head_t ioctl_resp_wait;
493 wait_queue_head_t d3ack_wait;
494 wait_queue_head_t dhd_bus_busy_state_wait;
495 uint32 default_wd_interval;
497 struct timer_list timer;
499 #ifdef DHD_PCIE_RUNTIMEPM
500 struct timer_list rpm_timer;
501 bool rpm_timer_valid;
502 tsk_ctl_t thr_rpm_ctl;
503 #endif /* DHD_PCIE_RUNTIMEPM */
504 struct tasklet_struct tasklet;
509 struct semaphore sdsem;
510 tsk_ctl_t thr_dpc_ctl;
511 tsk_ctl_t thr_wdt_ctl;
513 tsk_ctl_t thr_rxf_ctl;
515 bool rxthread_enabled;
518 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
519 struct wake_lock wl_wifi; /* Wifi wakelock */
520 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
521 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
522 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
523 struct wake_lock wl_evtwake; /* Wifi event wakelock */
524 #ifdef BCMPCIE_OOB_HOST_WAKE
525 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
526 #endif /* BCMPCIE_OOB_HOST_WAKE */
527 #ifdef DHD_USE_SCAN_WAKELOCK
528 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
529 #endif /* DHD_USE_SCAN_WAKELOCK */
530 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
532 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
533 /* net_device interface lock, prevent race conditions among net_dev interface
534 * calls and wifi_on or wifi_off
536 struct mutex dhd_net_if_mutex;
537 struct mutex dhd_suspend_mutex;
539 spinlock_t wakelock_spinlock;
540 spinlock_t wakelock_evt_spinlock;
541 uint32 wakelock_event_counter;
542 uint32 wakelock_counter;
543 int wakelock_wd_counter;
544 int wakelock_rx_timeout_enable;
545 int wakelock_ctrl_timeout_enable;
547 uint32 wakelock_before_waive;
549 /* Thread to issue ioctl for multicast */
550 wait_queue_head_t ctrl_wait;
551 atomic_t pend_8021x_cnt;
552 dhd_attach_states_t dhd_state;
554 dhd_event_log_t event_data;
555 #endif /* SHOW_LOGTRACE */
557 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
558 struct early_suspend early_suspend;
559 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
561 #ifdef ARP_OFFLOAD_SUPPORT
563 #endif /* ARP_OFFLOAD_SUPPORT */
567 struct timer_list rpcth_timer;
568 bool rpcth_timer_active;
571 #ifdef DHDTCPACK_SUPPRESS
572 spinlock_t tcpack_lock;
573 #endif /* DHDTCPACK_SUPPRESS */
574 #ifdef FIX_CPU_MIN_CLOCK
575 bool cpufreq_fix_status;
576 struct mutex cpufreq_fix;
577 struct pm_qos_request dhd_cpu_qos;
578 #ifdef FIX_BUS_MIN_CLOCK
579 struct pm_qos_request dhd_bus_qos;
580 #endif /* FIX_BUS_MIN_CLOCK */
581 #endif /* FIX_CPU_MIN_CLOCK */
582 void *dhd_deferred_wq;
583 #ifdef DEBUG_CPU_FREQ
584 struct notifier_block freq_trans;
585 int __percpu *new_freq;
588 struct notifier_block pm_notifier;
590 uint32 psta_mode; /* PSTA or PSR */
591 #endif /* DHD_PSTA */
594 struct timer_list join_timer;
595 u32 join_timeout_val;
596 bool join_timer_active;
597 uint scan_time_count;
598 struct timer_list scan_timer;
599 bool scan_timer_active;
602 /* CPU Load Balance dynamic CPU selection */
604 /* Variable that tracks the currect CPUs available for candidacy */
605 cpumask_var_t cpumask_curr_avail;
607 /* Primary and secondary CPU mask */
608 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
609 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
611 struct notifier_block cpu_notifier;
613 /* Tasklet to handle Tx Completion packet freeing */
614 struct tasklet_struct tx_compl_tasklet;
615 atomic_t tx_compl_cpu;
618 /* Tasklet to handle RxBuf Post during Rx completion */
619 struct tasklet_struct rx_compl_tasklet;
620 atomic_t rx_compl_cpu;
622 /* Napi struct for handling rx packet sendup. Packets are removed from
623 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
624 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
625 * to run to rx_napi_cpu.
627 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
628 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
629 struct napi_struct rx_napi_struct ____cacheline_aligned;
630 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
631 struct net_device *rx_napi_netdev; /* netdev of primary interface */
633 struct work_struct rx_napi_dispatcher_work;
634 struct work_struct tx_compl_dispatcher_work;
635 struct work_struct rx_compl_dispatcher_work;
636 /* Number of times DPC Tasklet ran */
639 /* Number of times NAPI processing got scheduled */
640 uint32 napi_sched_cnt;
642 /* Number of times NAPI processing ran on each available core */
643 uint32 napi_percpu_run_cnt[NR_CPUS];
645 /* Number of times RX Completions got scheduled */
646 uint32 rxc_sched_cnt;
647 /* Number of times RX Completion ran on each available core */
648 uint32 rxc_percpu_run_cnt[NR_CPUS];
650 /* Number of times TX Completions got scheduled */
651 uint32 txc_sched_cnt;
652 /* Number of times TX Completions ran on each available core */
653 uint32 txc_percpu_run_cnt[NR_CPUS];
656 /* Number of times each CPU came online */
657 uint32 cpu_online_cnt[NR_CPUS];
659 /* Number of times each CPU went offline */
660 uint32 cpu_offline_cnt[NR_CPUS];
663 * Consumer Histogram - NAPI RX Packet processing
664 * -----------------------------------------------
665 * On Each CPU, when the NAPI RX Packet processing call back was invoked
666 * how many packets were processed is captured in this data structure.
667 * Now its difficult to capture the "exact" number of packets processed.
668 * So considering the packet counter to be a 32 bit one, we have a
669 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
670 * processed is rounded off to the next power of 2 and put in the
671 * approriate "bin" the value in the bin gets incremented.
672 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
673 * and the packet count processed is as follows (assume the bin counters are 0)
674 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
675 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
676 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
678 uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
679 uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
680 uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
683 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
684 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
686 struct kobject dhd_kobj;
687 #ifdef SUPPORT_SENSORHUB
689 #endif /* SUPPORT_SENSORHUB */
691 struct delayed_work dhd_memdump_work;
694 #define DHDIF_FWDER(dhdif) FALSE
696 /* Flag to indicate if we should download firmware on driver load */
697 uint dhd_download_fw_on_driverload = TRUE;
699 /* Flag to indicate if driver is initialized */
700 uint dhd_driver_init_done = FALSE;
702 /* Definitions to provide path to the firmware and nvram
703 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
705 char firmware_path[MOD_PARAM_PATHLEN];
706 char nvram_path[MOD_PARAM_PATHLEN];
707 char config_path[MOD_PARAM_PATHLEN];
709 /* backup buffer for firmware and nvram path */
710 char fw_bak_path[MOD_PARAM_PATHLEN];
711 char nv_bak_path[MOD_PARAM_PATHLEN];
713 /* information string to keep firmware, chio, cheip version info visiable from log */
714 char info_string[MOD_PARAM_INFOLEN];
715 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
717 int disable_proptx = 0;
718 module_param(op_mode, int, 0644);
720 #if defined(DHD_LB_RXP)
721 static int dhd_napi_weight = 32;
722 module_param(dhd_napi_weight, int, 0644);
723 #endif /* DHD_LB_RXP */
725 extern int wl_control_wl_start(struct net_device *dev);
726 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
727 struct semaphore dhd_registration_sem;
728 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
730 /* deferred handlers */
731 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
732 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
733 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
734 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
735 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
736 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
737 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
739 extern void dhd_netdev_free(struct net_device *ndev);
740 #endif /* WL_CFG80211 */
743 module_param(dhd_msg_level, int, 0);
744 #if defined(WL_WIRELESS_EXT)
745 module_param(iw_msg_level, int, 0);
748 module_param(wl_dbg_level, int, 0);
750 module_param(android_msg_level, int, 0);
751 module_param(config_msg_level, int, 0);
753 #ifdef ARP_OFFLOAD_SUPPORT
754 /* ARP offload enable */
755 uint dhd_arp_enable = TRUE;
756 module_param(dhd_arp_enable, uint, 0);
758 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
760 #ifdef ENABLE_ARP_SNOOP_MODE
761 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
763 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
764 #endif /* ENABLE_ARP_SNOOP_MODE */
766 module_param(dhd_arp_mode, uint, 0);
767 #endif /* ARP_OFFLOAD_SUPPORT */
769 /* Disable Prop tx */
770 module_param(disable_proptx, int, 0644);
771 /* load firmware and/or nvram values from the filesystem */
772 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
773 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
774 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
776 /* Watchdog interval */
778 /* extend watchdog expiration to 2 seconds when DPC is running */
779 #define WATCHDOG_EXTEND_INTERVAL (2000)
781 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
782 module_param(dhd_watchdog_ms, uint, 0);
784 #ifdef DHD_PCIE_RUNTIMEPM
785 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
786 #endif /* DHD_PCIE_RUNTIMEPMT */
787 #if defined(DHD_DEBUG)
788 /* Console poll interval */
789 uint dhd_console_ms = 0;
790 module_param(dhd_console_ms, uint, 0644);
791 #endif /* defined(DHD_DEBUG) */
794 uint dhd_slpauto = TRUE;
795 module_param(dhd_slpauto, uint, 0);
797 #ifdef PKT_FILTER_SUPPORT
798 /* Global Pkt filter enable control */
799 uint dhd_pkt_filter_enable = TRUE;
800 module_param(dhd_pkt_filter_enable, uint, 0);
803 /* Pkt filter init setup */
804 uint dhd_pkt_filter_init = 0;
805 module_param(dhd_pkt_filter_init, uint, 0);
807 /* Pkt filter mode control */
808 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
809 uint dhd_master_mode = FALSE;
811 uint dhd_master_mode = FALSE;
812 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
813 module_param(dhd_master_mode, uint, 0);
815 int dhd_watchdog_prio = 0;
816 module_param(dhd_watchdog_prio, int, 0);
818 /* DPC thread priority */
819 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
820 module_param(dhd_dpc_prio, int, 0);
822 /* RX frame thread priority */
823 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
824 module_param(dhd_rxf_prio, int, 0);
826 int passive_channel_skip = 0;
827 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
829 #if !defined(BCMDHDUSB)
830 extern int dhd_dongle_ramsize;
831 module_param(dhd_dongle_ramsize, int, 0);
832 #endif /* BCMDHDUSB */
834 /* Keep track of number of instances */
835 static int dhd_found = 0;
836 static int instance_base = 0; /* Starting instance number */
837 module_param(instance_base, int, 0644);
839 /* Functions to manage sysfs interface for dhd */
840 static int dhd_sysfs_init(dhd_info_t *dhd);
841 static void dhd_sysfs_exit(dhd_info_t *dhd);
846 dhd_lb_set_default_cpus(dhd_info_t *dhd)
848 /* Default CPU allocation for the jobs */
849 atomic_set(&dhd->rx_napi_cpu, 1);
850 atomic_set(&dhd->rx_compl_cpu, 2);
851 atomic_set(&dhd->tx_compl_cpu, 2);
855 dhd_cpumasks_deinit(dhd_info_t *dhd)
857 free_cpumask_var(dhd->cpumask_curr_avail);
858 free_cpumask_var(dhd->cpumask_primary);
859 free_cpumask_var(dhd->cpumask_primary_new);
860 free_cpumask_var(dhd->cpumask_secondary);
861 free_cpumask_var(dhd->cpumask_secondary_new);
865 dhd_cpumasks_init(dhd_info_t *dhd)
871 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
872 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
873 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
874 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
875 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
876 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
881 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
882 cpumask_clear(dhd->cpumask_primary);
883 cpumask_clear(dhd->cpumask_secondary);
885 cpus = DHD_LB_PRIMARY_CPUS;
886 for (id = 0; id < NR_CPUS; id++) {
887 if (isset(&cpus, id))
888 cpumask_set_cpu(id, dhd->cpumask_primary);
891 cpus = DHD_LB_SECONDARY_CPUS;
892 for (id = 0; id < NR_CPUS; id++) {
893 if (isset(&cpus, id))
894 cpumask_set_cpu(id, dhd->cpumask_secondary);
899 dhd_cpumasks_deinit(dhd);
904 * The CPU Candidacy Algorithm
905 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
906 * The available CPUs for selection are divided into two groups
907 * Primary Set - A CPU mask that carries the First Choice CPUs
908 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
910 * There are two types of Job, that needs to be assigned to
911 * the CPUs, from one of the above mentioned CPU group. The Jobs are
912 * 1) Rx Packet Processing - napi_cpu
913 * 2) Completion Processiong (Tx, RX) - compl_cpu
915 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
916 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
917 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
918 * If there are more processors free, it assigns one to compl_cpu.
919 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
920 * CPU, as much as possible.
922 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
923 * would allow Tx completion skb's to be released into a local free pool from
924 * which the rx buffer posts could have been serviced. it is important to note
925 * that a Tx packet may not have a large enough buffer for rx posting.
927 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
929 uint32 primary_available_cpus; /* count of primary available cpus */
930 uint32 secondary_available_cpus; /* count of secondary available cpus */
931 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
932 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
934 cpumask_clear(dhd->cpumask_primary_new);
935 cpumask_clear(dhd->cpumask_secondary_new);
938 * Now select from the primary mask. Even if a Job is
939 * already running on a CPU in secondary group, we still move
940 * to primary CPU. So no conditional checks.
942 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
943 dhd->cpumask_curr_avail);
945 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
946 dhd->cpumask_curr_avail);
948 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
950 if (primary_available_cpus > 0) {
951 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
953 /* If no further CPU is available,
954 * cpumask_next returns >= nr_cpu_ids
956 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
957 if (compl_cpu >= nr_cpu_ids)
961 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
962 __FUNCTION__, napi_cpu, compl_cpu));
964 /* -- Now check for the CPUs from the secondary mask -- */
965 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
967 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
968 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
970 if (secondary_available_cpus > 0) {
971 /* At this point if napi_cpu is unassigned it means no CPU
972 * is online from Primary Group
975 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
976 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
977 } else if (compl_cpu == 0) {
978 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
981 /* If no CPU was available for completion, choose CPU 0 */
982 if (compl_cpu >= nr_cpu_ids)
985 if ((primary_available_cpus == 0) &&
986 (secondary_available_cpus == 0)) {
987 /* No CPUs available from primary or secondary mask */
992 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
993 __FUNCTION__, napi_cpu, compl_cpu));
994 ASSERT(napi_cpu < nr_cpu_ids);
995 ASSERT(compl_cpu < nr_cpu_ids);
997 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
998 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
999 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1004 * Function to handle CPU Hotplug notifications.
1005 * One of the task it does is to trigger the CPU Candidacy algorithm
1006 * for load balancing.
1009 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1011 unsigned int cpu = (unsigned int)(long)hcpu;
1013 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1018 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1019 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1020 dhd_select_cpu_candidacy(dhd);
1023 case CPU_DOWN_PREPARE:
1024 case CPU_DOWN_PREPARE_FROZEN:
1025 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1026 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1027 dhd_select_cpu_candidacy(dhd);
1036 #if defined(DHD_LB_STATS)
1037 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1043 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1050 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1054 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1055 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1056 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1057 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1059 for (i = 0; i < NR_CPUS; i++) {
1060 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1061 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1062 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1064 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1065 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1068 for (i = 0; i < NR_CPUS; i++) {
1069 for (j = 0; j < HIST_BIN_SIZE; j++) {
1070 DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1071 DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1072 DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1079 static void dhd_lb_stats_dump_histo(
1080 struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1083 uint32 per_cpu_total[NR_CPUS] = {0};
1086 bcm_bprintf(strbuf, "CPU: \t\t");
1087 for (i = 0; i < num_possible_cpus(); i++)
1088 bcm_bprintf(strbuf, "%d\t", i);
1089 bcm_bprintf(strbuf, "\nBin\n");
1091 for (i = 0; i < HIST_BIN_SIZE; i++) {
1092 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1093 for (j = 0; j < num_possible_cpus(); j++) {
1094 bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1096 bcm_bprintf(strbuf, "\n");
1098 bcm_bprintf(strbuf, "Per CPU Total \t");
1100 for (i = 0; i < num_possible_cpus(); i++) {
1101 for (j = 0; j < HIST_BIN_SIZE; j++) {
1102 per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1104 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1105 total += per_cpu_total[i];
1107 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1112 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1116 bcm_bprintf(strbuf, "CPU: \t");
1117 for (i = 0; i < num_possible_cpus(); i++)
1118 bcm_bprintf(strbuf, "%d\t", i);
1119 bcm_bprintf(strbuf, "\n");
1121 bcm_bprintf(strbuf, "Val: \t");
1122 for (i = 0; i < num_possible_cpus(); i++)
1123 bcm_bprintf(strbuf, "%u\t", *(p+i));
1124 bcm_bprintf(strbuf, "\n");
1128 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1132 if (dhdp == NULL || strbuf == NULL) {
1133 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1134 __FUNCTION__, dhdp, strbuf));
1140 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1144 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1145 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1147 bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1148 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1150 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1151 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1152 dhd->txc_sched_cnt);
1154 bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1155 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1156 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1157 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1158 #endif /* DHD_LB_RXP */
1161 bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1162 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1163 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1164 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1165 #endif /* DHD_LB_RXC */
1169 bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1170 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1171 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1172 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1173 #endif /* DHD_LB_TXC */
1176 static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1181 bin_power = next_larger_power2(count);
1183 switch (bin_power) {
1185 case 1: /* Fall through intentionally */
1186 case 2: p = bin + 0; break;
1187 case 4: p = bin + 1; break;
1188 case 8: p = bin + 2; break;
1189 case 16: p = bin + 3; break;
1190 case 32: p = bin + 4; break;
1191 case 64: p = bin + 5; break;
1192 case 128: p = bin + 6; break;
1193 default : p = bin + 7; break;
1200 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1203 dhd_info_t *dhd = dhdp->info;
1207 dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1212 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1215 dhd_info_t *dhd = dhdp->info;
1219 dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1224 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1227 dhd_info_t *dhd = dhdp->info;
1231 dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1236 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1238 dhd_info_t *dhd = dhdp->info;
1239 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1242 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1244 dhd_info_t *dhd = dhdp->info;
1245 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1248 #endif /* DHD_LB_STATS */
1252 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1253 int g_frameburst = 1;
1254 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1256 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1258 /* DHD Perimiter lock only used in router with bypass forwarding. */
1259 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1260 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1261 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1263 #ifdef PCIE_FULL_DONGLE
1264 #if defined(BCM_GMAC3)
1265 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1266 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1267 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1269 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1270 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1271 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1272 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1274 #else /* ! BCM_GMAC3 */
1275 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1276 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1277 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1278 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1279 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1281 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1282 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1283 struct list_head *snapshot_list);
1284 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1285 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1286 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1287 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1289 #endif /* ! BCM_GMAC3 */
1290 #endif /* PCIE_FULL_DONGLE */
1292 /* Control fw roaming */
1293 uint dhd_roam_disable = 0;
1296 extern int dhd_dbg_init(dhd_pub_t *dhdp);
1297 extern void dhd_dbg_remove(void);
1300 /* Control radio state */
1301 uint dhd_radio_up = 1;
1303 /* Network inteface name */
1304 char iface_name[IFNAMSIZ] = {'\0'};
1305 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1307 /* The following are specific to the SDIO dongle */
1309 /* IOCTL response timeout */
1310 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1312 /* Idle timeout for backplane clock */
1313 int dhd_idletime = DHD_IDLETIME_TICKS;
1314 module_param(dhd_idletime, int, 0);
1317 uint dhd_poll = FALSE;
1318 module_param(dhd_poll, uint, 0);
1320 /* Use interrupts */
1321 uint dhd_intr = TRUE;
1322 module_param(dhd_intr, uint, 0);
1324 /* SDIO Drive Strength (in milliamps) */
1325 uint dhd_sdiod_drive_strength = 6;
1326 module_param(dhd_sdiod_drive_strength, uint, 0);
1330 extern uint dhd_txbound;
1331 extern uint dhd_rxbound;
1332 module_param(dhd_txbound, uint, 0);
1333 module_param(dhd_rxbound, uint, 0);
1335 /* Deferred transmits */
1336 extern uint dhd_deferred_tx;
1337 module_param(dhd_deferred_tx, uint, 0);
1339 #endif /* BCMSDIO */
1343 /* Echo packet generator (pkts/s) */
1344 uint dhd_pktgen = 0;
1345 module_param(dhd_pktgen, uint, 0);
1347 /* Echo packet len (0 => sawtooth, max 2040) */
1348 uint dhd_pktgen_len = 0;
1349 module_param(dhd_pktgen_len, uint, 0);
1354 /* Allow delayed firmware download for debug purpose */
1355 int allow_delay_fwdl = FALSE;
1356 module_param(allow_delay_fwdl, int, 0);
1358 extern char dhd_version[];
1359 extern char fw_version[];
1361 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1362 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1363 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1364 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1365 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1368 void htsf_update(dhd_info_t *dhd, void *data);
1369 tsf_t prev_tsf, cur_tsf;
1371 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1372 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1373 static void dhd_dump_latency(void);
1374 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1375 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1376 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1377 #endif /* WLMEDIA_HTSF */
1379 /* Monitor interface */
1380 int dhd_monitor_init(void *dhd_pub);
1381 int dhd_monitor_uninit(void);
1384 #if defined(WL_WIRELESS_EXT)
1385 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1386 #endif /* defined(WL_WIRELESS_EXT) */
1388 static void dhd_dpc(ulong data);
1390 extern int dhd_wait_pend8021x(struct net_device *dev);
1391 void dhd_os_wd_timer_extend(void *bus, bool extend);
1395 #error TOE requires BDC
1397 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1398 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1401 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1402 wl_event_msg_t *event_ptr, void **data_ptr);
1404 #if defined(CONFIG_PM_SLEEP)
1405 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1407 int ret = NOTIFY_DONE;
1408 bool suspend = FALSE;
1409 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1411 BCM_REFERENCE(dhdinfo);
1414 case PM_HIBERNATION_PREPARE:
1415 case PM_SUSPEND_PREPARE:
1419 case PM_POST_HIBERNATION:
1420 case PM_POST_SUSPEND:
1425 #if defined(SUPPORT_P2P_GO_PS)
1426 #ifdef PROP_TXSTATUS
1428 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1429 dhd_wlfc_suspend(&dhdinfo->pub);
1430 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1432 dhd_wlfc_resume(&dhdinfo->pub);
1433 #endif /* PROP_TXSTATUS */
1434 #endif /* defined(SUPPORT_P2P_GO_PS) */
1436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1437 KERNEL_VERSION(2, 6, 39))
1438 dhd_mmc_suspend = suspend;
1445 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1446 * created in kernel notifier link list (with 'next' pointing to itself)
1448 static bool dhd_pm_notifier_registered = FALSE;
1450 extern int register_pm_notifier(struct notifier_block *nb);
1451 extern int unregister_pm_notifier(struct notifier_block *nb);
1452 #endif /* CONFIG_PM_SLEEP */
1454 /* Request scheduling of the bus rx frame */
1455 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1456 static void dhd_os_rxflock(dhd_pub_t *pub);
1457 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1459 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1460 typedef struct dhd_dev_priv {
1461 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1462 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1463 int ifidx; /* interface index */
1466 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1467 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1468 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1469 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1470 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1472 /** Clear the dhd net_device's private structure. */
1474 dhd_dev_priv_clear(struct net_device * dev)
1476 dhd_dev_priv_t * dev_priv;
1477 ASSERT(dev != (struct net_device *)NULL);
1478 dev_priv = DHD_DEV_PRIV(dev);
1479 dev_priv->dhd = (dhd_info_t *)NULL;
1480 dev_priv->ifp = (dhd_if_t *)NULL;
1481 dev_priv->ifidx = DHD_BAD_IF;
1484 /** Setup the dhd net_device's private structure. */
1486 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1489 dhd_dev_priv_t * dev_priv;
1490 ASSERT(dev != (struct net_device *)NULL);
1491 dev_priv = DHD_DEV_PRIV(dev);
1492 dev_priv->dhd = dhd;
1493 dev_priv->ifp = ifp;
1494 dev_priv->ifidx = ifidx;
1497 #ifdef PCIE_FULL_DONGLE
1499 /** Dummy objects are defined with state representing bad|down.
1500 * Performance gains from reducing branch conditionals, instruction parallelism,
1501 * dual issue, reducing load shadows, avail of larger pipelines.
1502 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1503 * is accessed via the dhd_sta_t.
1506 /* Dummy dhd_info object */
1507 dhd_info_t dhd_info_null = {
1508 #if defined(BCM_GMAC3)
1512 .info = &dhd_info_null,
1513 #ifdef DHDTCPACK_SUPPRESS
1514 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1515 #endif /* DHDTCPACK_SUPPRESS */
1517 .busstate = DHD_BUS_DOWN
1520 #define DHD_INFO_NULL (&dhd_info_null)
1521 #define DHD_PUB_NULL (&dhd_info_null.pub)
1523 /* Dummy netdevice object */
1524 struct net_device dhd_net_dev_null = {
1525 .reg_state = NETREG_UNREGISTERED
1527 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1529 /* Dummy dhd_if object */
1530 dhd_if_t dhd_if_null = {
1531 #if defined(BCM_GMAC3)
1535 .wmf = { .wmf_enable = TRUE },
1537 .info = DHD_INFO_NULL,
1538 .net = DHD_NET_DEV_NULL,
1541 #define DHD_IF_NULL (&dhd_if_null)
1543 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1545 /** Interface STA list management. */
1547 /** Fetch the dhd_if object, given the interface index in the dhd. */
1548 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1550 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1551 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1552 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1554 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1555 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1556 static void dhd_if_flush_sta(dhd_if_t * ifp);
1558 /* Construct/Destruct a sta pool. */
1559 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1560 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1561 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1562 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1565 /* Return interface pointer */
1566 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1568 ASSERT(ifidx < DHD_MAX_IFS);
1570 if (ifidx >= DHD_MAX_IFS)
1573 return dhdp->info->iflist[ifidx];
1576 /** Reset a dhd_sta object and free into the dhd pool. */
1578 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1582 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1584 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1587 * Flush and free all packets in all flowring's queues belonging to sta.
1588 * Packets in flow ring will be flushed later.
1590 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1591 uint16 flowid = sta->flowid[prio];
1593 if (flowid != FLOWID_INVALID) {
1594 unsigned long flags;
1595 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1596 flow_ring_node_t * flow_ring_node;
1598 #ifdef DHDTCPACK_SUPPRESS
1599 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1600 * when there is a newly coming packet from network stack.
1602 dhd_tcpack_info_tbl_clean(dhdp);
1603 #endif /* DHDTCPACK_SUPPRESS */
1605 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1606 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1607 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1609 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1611 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1612 PKTFREE(dhdp->osh, pkt, TRUE);
1616 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1617 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1620 sta->flowid[prio] = FLOWID_INVALID;
1623 id16_map_free(dhdp->staid_allocator, sta->idx);
1624 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1625 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1626 sta->ifidx = DHD_BAD_IF;
1627 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1628 INIT_LIST_HEAD(&sta->list);
1629 sta->idx = ID16_INVALID; /* implying free */
1632 /** Allocate a dhd_sta object from the dhd pool. */
1634 dhd_sta_alloc(dhd_pub_t * dhdp)
1638 dhd_sta_pool_t * sta_pool;
1640 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1642 idx = id16_map_alloc(dhdp->staid_allocator);
1643 if (idx == ID16_INVALID) {
1644 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1645 return DHD_STA_NULL;
1648 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1649 sta = &sta_pool[idx];
1651 ASSERT((sta->idx == ID16_INVALID) &&
1652 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1654 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1656 sta->idx = idx; /* implying allocated */
1661 /** Delete all STAs in an interface's STA list. */
1663 dhd_if_del_sta_list(dhd_if_t *ifp)
1665 dhd_sta_t *sta, *next;
1666 unsigned long flags;
1668 DHD_IF_STA_LIST_LOCK(ifp, flags);
1670 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1671 #if defined(BCM_GMAC3)
1673 /* Remove sta from WOFA forwarder. */
1674 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1676 #endif /* BCM_GMAC3 */
1677 list_del(&sta->list);
1678 dhd_sta_free(&ifp->info->pub, sta);
1681 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1686 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1688 dhd_if_flush_sta(dhd_if_t * ifp)
1690 #if defined(BCM_GMAC3)
1692 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1693 dhd_sta_t *sta, *next;
1694 unsigned long flags;
1696 DHD_IF_STA_LIST_LOCK(ifp, flags);
1698 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1699 /* Remove any sta entry from WOFA forwarder. */
1700 fwder_flush(ifp->fwdh, (wofa_t)sta);
1703 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1705 #endif /* BCM_GMAC3 */
1708 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1710 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1712 int idx, prio, sta_pool_memsz;
1714 dhd_sta_pool_t * sta_pool;
1715 void * staid_allocator;
1717 ASSERT(dhdp != (dhd_pub_t *)NULL);
1718 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1720 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1721 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1722 if (staid_allocator == NULL) {
1723 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1727 /* Pre allocate a pool of dhd_sta objects (one extra). */
1728 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1729 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1730 if (sta_pool == NULL) {
1731 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1732 id16_map_fini(dhdp->osh, staid_allocator);
1736 dhdp->sta_pool = sta_pool;
1737 dhdp->staid_allocator = staid_allocator;
1739 /* Initialize all sta(s) for the pre-allocated free pool. */
1740 bzero((uchar *)sta_pool, sta_pool_memsz);
1741 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1742 sta = &sta_pool[idx];
1743 sta->idx = id16_map_alloc(staid_allocator);
1744 ASSERT(sta->idx <= max_sta);
1746 /* Now place them into the pre-allocated free pool. */
1747 for (idx = 1; idx <= max_sta; idx++) {
1748 sta = &sta_pool[idx];
1749 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1750 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1752 dhd_sta_free(dhdp, sta);
1758 /** Destruct the pool of dhd_sta_t objects.
1759 * Caller must ensure that no STA objects are currently associated with an if.
1762 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1764 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1768 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1769 for (idx = 1; idx <= max_sta; idx++) {
1770 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1771 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1773 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1774 dhdp->sta_pool = NULL;
1777 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1778 dhdp->staid_allocator = NULL;
1781 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1783 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1785 int idx, prio, sta_pool_memsz;
1787 dhd_sta_pool_t * sta_pool;
1788 void *staid_allocator;
1791 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1795 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1796 staid_allocator = dhdp->staid_allocator;
1799 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1803 if (!staid_allocator) {
1804 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1808 /* clear free pool */
1809 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1810 bzero((uchar *)sta_pool, sta_pool_memsz);
1812 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1813 id16_map_clear(staid_allocator, max_sta, 1);
1815 /* Initialize all sta(s) for the pre-allocated free pool. */
1816 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1817 sta = &sta_pool[idx];
1818 sta->idx = id16_map_alloc(staid_allocator);
1819 ASSERT(sta->idx <= max_sta);
1821 /* Now place them into the pre-allocated free pool. */
1822 for (idx = 1; idx <= max_sta; idx++) {
1823 sta = &sta_pool[idx];
1824 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1825 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1827 dhd_sta_free(dhdp, sta);
1831 /** Find STA with MAC address ea in an interface's STA list. */
1833 dhd_find_sta(void *pub, int ifidx, void *ea)
1837 unsigned long flags;
1840 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1842 return DHD_STA_NULL;
1844 DHD_IF_STA_LIST_LOCK(ifp, flags);
1846 list_for_each_entry(sta, &ifp->sta_list, list) {
1847 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1848 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1853 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1855 return DHD_STA_NULL;
1858 /** Add STA into the interface's STA list. */
1860 dhd_add_sta(void *pub, int ifidx, void *ea)
1864 unsigned long flags;
1867 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1869 return DHD_STA_NULL;
1871 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1872 if (sta == DHD_STA_NULL) {
1873 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1874 return DHD_STA_NULL;
1877 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1879 /* link the sta and the dhd interface */
1882 INIT_LIST_HEAD(&sta->list);
1884 DHD_IF_STA_LIST_LOCK(ifp, flags);
1886 list_add_tail(&sta->list, &ifp->sta_list);
1888 #if defined(BCM_GMAC3)
1890 ASSERT(ISALIGNED(ea, 2));
1891 /* Add sta to WOFA forwarder. */
1892 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1894 #endif /* BCM_GMAC3 */
1896 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1901 /** Delete STA from the interface's STA list. */
1903 dhd_del_sta(void *pub, int ifidx, void *ea)
1905 dhd_sta_t *sta, *next;
1907 unsigned long flags;
1910 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1914 DHD_IF_STA_LIST_LOCK(ifp, flags);
1916 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1917 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1918 #if defined(BCM_GMAC3)
1919 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1920 ASSERT(ISALIGNED(ea, 2));
1921 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1923 #endif /* BCM_GMAC3 */
1924 list_del(&sta->list);
1925 dhd_sta_free(&ifp->info->pub, sta);
1929 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1930 #ifdef DHD_L2_FILTER
1931 if (ifp->parp_enable) {
1932 /* clear Proxy ARP cache of specific Ethernet Address */
1933 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1934 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1936 #endif /* DHD_L2_FILTER */
1940 /** Add STA if it doesn't exist. Not reentrant. */
1942 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1946 sta = dhd_find_sta(pub, ifidx, ea);
1950 sta = dhd_add_sta(pub, ifidx, ea);
1956 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1957 #if !defined(BCM_GMAC3)
1958 static struct list_head *
1959 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1961 unsigned long flags;
1962 dhd_sta_t *sta, *snapshot;
1964 INIT_LIST_HEAD(snapshot_list);
1966 DHD_IF_STA_LIST_LOCK(ifp, flags);
1968 list_for_each_entry(sta, &ifp->sta_list, list) {
1969 /* allocate one and add to snapshot */
1970 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1971 if (snapshot == NULL) {
1972 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1976 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1978 INIT_LIST_HEAD(&snapshot->list);
1979 list_add_tail(&snapshot->list, snapshot_list);
1982 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1984 return snapshot_list;
1988 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1990 dhd_sta_t *sta, *next;
1992 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1993 list_del(&sta->list);
1994 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1997 #endif /* !BCM_GMAC3 */
1998 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2001 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2002 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2003 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2004 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2005 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2006 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2007 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2008 #endif /* PCIE_FULL_DONGLE */
2013 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2015 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2016 * CPU and schedules a tasklet.
2017 * @tasklet: opaque pointer to the tasklet
2020 dhd_tasklet_schedule(void *tasklet)
2022 tasklet_schedule((struct tasklet_struct *)tasklet);
2026 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2027 * @tasklet: tasklet to be scheduled
2028 * @on_cpu: cpu core id
2030 * If the requested cpu is online, then an IPI is sent to this cpu via the
2031 * smp_call_function_single with no wait and the tasklet_schedule function
2032 * will be invoked to schedule the specified tasklet on the requested CPU.
2035 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2038 smp_call_function_single(on_cpu,
2039 dhd_tasklet_schedule, (void *)tasklet, wait);
2041 #endif /* DHD_LB_TXC || DHD_LB_RXC */
2044 #if defined(DHD_LB_TXC)
2046 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2047 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2048 * freeing the packets placed in the tx_compl workq
2051 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2053 dhd_info_t *dhd = dhdp->info;
2054 int curr_cpu, on_cpu;
2056 if (dhd->rx_napi_netdev == NULL) {
2057 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2061 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2063 * If the destination CPU is NOT online or is same as current CPU
2064 * no need to schedule the work
2066 curr_cpu = get_cpu();
2069 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2071 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2072 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2074 schedule_work(&dhd->tx_compl_dispatcher_work);
2078 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2080 struct dhd_info *dhd =
2081 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2085 cpu = atomic_read(&dhd->tx_compl_cpu);
2086 if (!cpu_online(cpu))
2087 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2089 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2093 #endif /* DHD_LB_TXC */
2096 #if defined(DHD_LB_RXC)
2098 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2099 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2100 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2101 * placed in the rx_compl workq.
2103 * @dhdp: pointer to dhd_pub object
2106 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2108 dhd_info_t *dhd = dhdp->info;
2109 int curr_cpu, on_cpu;
2111 if (dhd->rx_napi_netdev == NULL) {
2112 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2116 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2118 * If the destination CPU is NOT online or is same as current CPU
2119 * no need to schedule the work
2121 curr_cpu = get_cpu();
2124 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2126 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2127 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2129 schedule_work(&dhd->rx_compl_dispatcher_work);
2133 static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2135 struct dhd_info *dhd =
2136 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2140 cpu = atomic_read(&dhd->tx_compl_cpu);
2141 if (!cpu_online(cpu))
2142 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2144 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2148 #endif /* DHD_LB_RXC */
2151 #if defined(DHD_LB_RXP)
2153 * dhd_napi_poll - Load balance napi poll function to process received
2154 * packets and send up the network stack using netif_receive_skb()
2156 * @napi: napi object in which context this poll function is invoked
2157 * @budget: number of packets to be processed.
2159 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2160 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2161 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2162 * packet tag and sendup.
2165 dhd_napi_poll(struct napi_struct *napi, int budget)
2168 const int pkt_count = 1;
2170 struct sk_buff * skb;
2171 unsigned long flags;
2172 struct dhd_info *dhd;
2174 struct sk_buff_head rx_process_queue;
2176 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2177 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2178 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2180 __skb_queue_head_init(&rx_process_queue);
2182 /* extract the entire rx_napi_queue into local rx_process_queue */
2183 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2184 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2185 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2187 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2188 OSL_PREFETCH(skb->data);
2190 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2192 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2193 __FUNCTION__, skb, ifid));
2195 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2199 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2201 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2202 napi_complete(napi);
2208 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2209 * poll list. This function may be invoked via the smp_call_function_single
2210 * from a remote CPU.
2212 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2213 * after the napi_struct is added to the softnet data's poll_list
2215 * @info: pointer to a dhd_info struct
2218 dhd_napi_schedule(void *info)
2220 dhd_info_t *dhd = (dhd_info_t *)info;
2222 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2223 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2225 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2226 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2227 __napi_schedule(&dhd->rx_napi_struct);
2228 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2232 * If the rx_napi_struct was already running, then we let it complete
2233 * processing all its packets. The rx_napi_struct may only run on one
2234 * core at a time, to avoid out-of-order handling.
2239 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2240 * action after placing the dhd's rx_process napi object in the the remote CPU's
2241 * softnet data's poll_list.
2243 * @dhd: dhd_info which has the rx_process napi object
2244 * @on_cpu: desired remote CPU id
2247 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2249 int wait = 0; /* asynchronous IPI */
2251 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2252 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2254 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2255 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2256 __FUNCTION__, on_cpu));
2259 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2265 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2266 * Why should we do this?
2267 * The candidacy algorithm is run from the call back function
2268 * registered to CPU hotplug notifier. This call back happens from Worker
2269 * context. The dhd_napi_schedule_on is also from worker context.
2270 * Note that both of this can run on two different CPUs at the same time.
2271 * So we can possibly have a window where a given CPUn is being brought
2272 * down from CPUm while we try to run a function on CPUn.
2273 * To prevent this its better have the whole code to execute an SMP
2274 * function under get_online_cpus.
2275 * This function call ensures that hotplug mechanism does not kick-in
2276 * until we are done dealing with online CPUs
2277 * If the hotplug worker is already running, no worries because the
2278 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2280 * The below mentioned code structure is proposed in
2281 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2283 * Q: I need to ensure that a particular cpu is not removed when there is some
2284 * work specific to this cpu is in progress
2286 * According to the documentation calling get_online_cpus is NOT required, if
2287 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2288 * run from Work Queue context we have to call these functions
2290 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2292 struct dhd_info *dhd =
2293 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2297 cpu = atomic_read(&dhd->rx_napi_cpu);
2298 if (!cpu_online(cpu))
2299 dhd_napi_schedule(dhd);
2301 dhd_napi_schedule_on(dhd, cpu);
2306 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2307 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2308 * the packets enqueued into the rx_napi_queue and sendup.
2309 * The producer's rx packet queue is appended to the rx_napi_queue before
2310 * dispatching the rx_napi_struct.
2313 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2315 unsigned long flags;
2316 dhd_info_t *dhd = dhdp->info;
2320 if (dhd->rx_napi_netdev == NULL) {
2321 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2325 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2326 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2328 /* append the producer's queue of packets to the napi's rx process queue */
2329 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2330 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2331 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2334 * If the destination CPU is NOT online or is same as current CPU
2335 * no need to schedule the work
2337 curr_cpu = get_cpu();
2340 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2342 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2343 dhd_napi_schedule(dhd);
2345 schedule_work(&dhd->rx_napi_dispatcher_work);
2350 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2353 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2355 dhd_info_t *dhd = dhdp->info;
2357 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2358 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2359 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2360 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2362 #endif /* DHD_LB_RXP */
2366 static void dhd_memdump_work_handler(struct work_struct * work)
2368 struct dhd_info *dhd =
2369 container_of(work, struct dhd_info, dhd_memdump_work.work);
2373 dhd_prot_collect_memdump(&dhd->pub);
2378 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2379 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2382 dhd_info_t *dhd = dhdp->info;
2385 ASSERT(bssidx < DHD_MAX_IFS);
2388 for (i = 0; i < DHD_MAX_IFS; i++) {
2389 ifp = dhd->iflist[i];
2390 if (ifp && (ifp->bssidx == bssidx)) {
2391 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2392 ifp->name, bssidx, i));
2399 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2405 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2409 dhd_os_rxflock(dhdp);
2410 store_idx = dhdp->store_idx;
2411 sent_idx = dhdp->sent_idx;
2412 if (dhdp->skbbuf[store_idx] != NULL) {
2413 /* Make sure the previous packets are processed */
2414 dhd_os_rxfunlock(dhdp);
2415 #ifdef RXF_DEQUEUE_ON_BUSY
2416 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2417 skb, store_idx, sent_idx));
2419 #else /* RXF_DEQUEUE_ON_BUSY */
2420 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2421 skb, store_idx, sent_idx));
2422 /* removed msleep here, should use wait_event_timeout if we
2423 * want to give rx frame thread a chance to run
2425 #if defined(WAIT_DEQUEUE)
2429 #endif /* RXF_DEQUEUE_ON_BUSY */
2431 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2432 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2433 dhdp->skbbuf[store_idx] = skb;
2434 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2435 dhd_os_rxfunlock(dhdp);
2440 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2446 dhd_os_rxflock(dhdp);
2448 store_idx = dhdp->store_idx;
2449 sent_idx = dhdp->sent_idx;
2450 skb = dhdp->skbbuf[sent_idx];
2453 dhd_os_rxfunlock(dhdp);
2454 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2455 store_idx, sent_idx));
2459 dhdp->skbbuf[sent_idx] = NULL;
2460 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2462 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2465 dhd_os_rxfunlock(dhdp);
2470 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2472 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2474 if (prepost) { /* pre process */
2475 dhd_read_macaddr(dhd);
2476 } else { /* post process */
2477 dhd_write_macaddr(&dhd->pub.mac);
2483 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2485 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2487 bool _apply = FALSE;
2488 /* In case of IBSS mode, apply arp pkt filter */
2489 if (op_mode & DHD_FLAG_IBSS_MODE) {
2493 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2494 if ((dhd->arp_version == 1) &&
2495 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2503 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2505 void dhd_set_packet_filter(dhd_pub_t *dhd)
2507 #ifdef PKT_FILTER_SUPPORT
2510 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2511 if (dhd_pkt_filter_enable) {
2512 for (i = 0; i < dhd->pktfilter_count; i++) {
2513 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2516 #endif /* PKT_FILTER_SUPPORT */
2519 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2521 #ifdef PKT_FILTER_SUPPORT
2524 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2526 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2527 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2530 /* 1 - Enable packet filter, only allow unicast packet to send up */
2531 /* 0 - Disable packet filter */
2532 if (dhd_pkt_filter_enable && (!value ||
2533 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2535 for (i = 0; i < dhd->pktfilter_count; i++) {
2536 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2537 if (value && (i == DHD_ARP_FILTER_NUM) &&
2538 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2539 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2540 "val %d, cnt %d, op_mode 0x%x\n",
2541 value, i, dhd->op_mode));
2544 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2545 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2546 value, dhd_master_mode);
2549 #endif /* PKT_FILTER_SUPPORT */
2552 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2554 #ifndef SUPPORT_PM2_ONLY
2555 int power_mode = PM_MAX;
2556 #endif /* SUPPORT_PM2_ONLY */
2557 #ifdef SUPPORT_SENSORHUB
2559 #endif /* SUPPORT_SENSORHUB */
2560 /* wl_pkt_filter_enable_t enable_parm; */
2562 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2563 #ifdef DHD_USE_EARLYSUSPEND
2564 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2565 int bcn_timeout = 0;
2566 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2567 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2568 int roam_time_thresh = 0; /* (ms) */
2569 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2570 #ifndef ENABLE_FW_ROAM_SUSPEND
2571 uint roamvar = dhd->conf->roam_off_suspend;
2572 #endif /* ENABLE_FW_ROAM_SUSPEND */
2573 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2575 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2576 uint nd_ra_filter = 0;
2578 #endif /* DHD_USE_EARLYSUSPEND */
2579 #ifdef PASS_ALL_MCAST_PKTS
2580 struct dhd_info *dhdinfo;
2583 #endif /* PASS_ALL_MCAST_PKTS */
2584 #ifdef DYNAMIC_SWOOB_DURATION
2585 #ifndef CUSTOM_INTR_WIDTH
2586 #define CUSTOM_INTR_WIDTH 100
2588 #endif /* CUSTOM_INTR_WIDTH */
2589 #endif /* DYNAMIC_SWOOB_DURATION */
2594 #ifdef PASS_ALL_MCAST_PKTS
2595 dhdinfo = dhd->info;
2596 #endif /* PASS_ALL_MCAST_PKTS */
2598 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2599 __FUNCTION__, value, dhd->in_suspend));
2601 dhd_suspend_lock(dhd);
2603 #ifdef CUSTOM_SET_CPUCORE
2604 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2605 /* set specific cpucore */
2606 dhd_set_cpucore(dhd, TRUE);
2607 #endif /* CUSTOM_SET_CPUCORE */
2608 #ifndef SUPPORT_PM2_ONLY
2609 if (dhd->conf->pm >= 0)
2610 power_mode = dhd->conf->pm;
2611 #endif /* SUPPORT_PM2_ONLY */
2613 if (value && dhd->in_suspend) {
2614 #ifdef PKT_FILTER_SUPPORT
2615 dhd->early_suspended = 1;
2617 /* Kernel suspended */
2618 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2620 #ifdef SUPPORT_SENSORHUB
2622 if (dhd->info->shub_enable == 1) {
2623 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2624 iovbuf, sizeof(iovbuf));
2625 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2626 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2627 DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2628 __FUNCTION__, ret));
2631 #endif /* SUPPORT_SENSORHUB */
2633 #ifndef SUPPORT_PM2_ONLY
2634 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2635 sizeof(power_mode), TRUE, 0);
2636 #endif /* SUPPORT_PM2_ONLY */
2638 #ifdef PKT_FILTER_SUPPORT
2639 /* Enable packet filter,
2640 * only allow unicast packet to send up
2642 dhd_enable_packet_filter(1, dhd);
2643 #endif /* PKT_FILTER_SUPPORT */
2645 #ifdef PASS_ALL_MCAST_PKTS
2647 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2648 iovbuf, sizeof(iovbuf));
2649 for (i = 0; i < DHD_MAX_IFS; i++) {
2650 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2651 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2652 sizeof(iovbuf), TRUE, i);
2654 #endif /* PASS_ALL_MCAST_PKTS */
2656 /* If DTIM skip is set up as default, force it to wake
2657 * each third DTIM for better power savings. Note that
2658 * one side effect is a chance to miss BC/MC packet.
2661 /* Do not set bcn_li_ditm on WFD mode */
2662 if (dhd->tdls_mode) {
2666 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2667 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2668 4, iovbuf, sizeof(iovbuf));
2669 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2671 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2673 #ifdef DHD_USE_EARLYSUSPEND
2674 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2675 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2676 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2677 4, iovbuf, sizeof(iovbuf));
2678 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2679 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2680 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2681 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2682 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2683 4, iovbuf, sizeof(iovbuf));
2684 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2685 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2686 #ifndef ENABLE_FW_ROAM_SUSPEND
2687 /* Disable firmware roaming during suspend */
2688 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2689 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2690 #endif /* ENABLE_FW_ROAM_SUSPEND */
2691 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2693 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2694 4, iovbuf, sizeof(iovbuf));
2695 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2696 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2697 if (FW_SUPPORTED(dhd, ndoe)) {
2698 /* enable IPv6 RA filter in firmware during suspend */
2700 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2701 iovbuf, sizeof(iovbuf));
2702 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2703 sizeof(iovbuf), TRUE, 0)) < 0)
2704 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2707 #ifdef DYNAMIC_SWOOB_DURATION
2708 intr_width = CUSTOM_INTR_WIDTH;
2709 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2710 iovbuf, sizeof(iovbuf));
2711 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2712 sizeof(iovbuf), TRUE, 0)) < 0) {
2713 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2715 #endif /* DYNAMIC_SWOOB_DURATION */
2716 #endif /* DHD_USE_EARLYSUSPEND */
2718 #ifdef PKT_FILTER_SUPPORT
2719 dhd->early_suspended = 0;
2721 /* Kernel resumed */
2722 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2724 #ifdef SUPPORT_SENSORHUB
2726 if (dhd->info->shub_enable == 1) {
2727 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2728 4, iovbuf, sizeof(iovbuf));
2729 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2730 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2731 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2732 "failed %d\n", __FUNCTION__, ret));
2735 #endif /* SUPPORT_SENSORHUB */
2738 #ifdef DYNAMIC_SWOOB_DURATION
2740 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2741 iovbuf, sizeof(iovbuf));
2742 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2743 sizeof(iovbuf), TRUE, 0)) < 0) {
2744 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2746 #endif /* DYNAMIC_SWOOB_DURATION */
2747 #ifndef SUPPORT_PM2_ONLY
2748 power_mode = PM_FAST;
2749 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2750 sizeof(power_mode), TRUE, 0);
2751 #endif /* SUPPORT_PM2_ONLY */
2752 #ifdef PKT_FILTER_SUPPORT
2753 /* disable pkt filter */
2754 dhd_enable_packet_filter(0, dhd);
2755 #endif /* PKT_FILTER_SUPPORT */
2756 #ifdef PASS_ALL_MCAST_PKTS
2758 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2759 iovbuf, sizeof(iovbuf));
2760 for (i = 0; i < DHD_MAX_IFS; i++) {
2761 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2762 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2763 sizeof(iovbuf), TRUE, i);
2765 #endif /* PASS_ALL_MCAST_PKTS */
2767 /* restore pre-suspend setting for dtim_skip */
2768 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2769 4, iovbuf, sizeof(iovbuf));
2771 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2772 #ifdef DHD_USE_EARLYSUSPEND
2773 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2774 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2775 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2776 4, iovbuf, sizeof(iovbuf));
2777 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2778 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2779 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2780 roam_time_thresh = 2000;
2781 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2782 4, iovbuf, sizeof(iovbuf));
2783 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2784 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2785 #ifndef ENABLE_FW_ROAM_SUSPEND
2786 roamvar = dhd_roam_disable;
2787 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2788 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2789 #endif /* ENABLE_FW_ROAM_SUSPEND */
2790 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2792 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2793 4, iovbuf, sizeof(iovbuf));
2794 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2795 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2796 if (FW_SUPPORTED(dhd, ndoe)) {
2797 /* disable IPv6 RA filter in firmware during suspend */
2799 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2800 iovbuf, sizeof(iovbuf));
2801 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2802 sizeof(iovbuf), TRUE, 0)) < 0)
2803 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2806 #endif /* DHD_USE_EARLYSUSPEND */
2809 dhd_suspend_unlock(dhd);
2814 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2816 dhd_pub_t *dhdp = &dhd->pub;
2819 DHD_OS_WAKE_LOCK(dhdp);
2820 DHD_PERIM_LOCK(dhdp);
2822 /* Set flag when early suspend was called */
2823 dhdp->in_suspend = val;
2824 if ((force || !dhdp->suspend_disable_flag) &&
2825 dhd_support_sta_mode(dhdp))
2827 ret = dhd_set_suspend(val, dhdp);
2830 DHD_PERIM_UNLOCK(dhdp);
2831 DHD_OS_WAKE_UNLOCK(dhdp);
2835 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2836 static void dhd_early_suspend(struct early_suspend *h)
2838 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2839 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2842 dhd_suspend_resume_helper(dhd, 1, 0);
2845 static void dhd_late_resume(struct early_suspend *h)
2847 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2848 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2851 dhd_suspend_resume_helper(dhd, 0, 0);
2853 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2856 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2857 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2859 * dhd_timeout_start(&tmo, usec);
2860 * while (!dhd_timeout_expired(&tmo))
2861 * if (poll_something())
2863 * if (dhd_timeout_expired(&tmo))
2868 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2873 tmo->tick = jiffies_to_usecs(1);
2877 dhd_timeout_expired(dhd_timeout_t *tmo)
2879 /* Does nothing the first call */
2880 if (tmo->increment == 0) {
2885 if (tmo->elapsed >= tmo->limit)
2888 /* Add the delay that's about to take place */
2889 tmo->elapsed += tmo->increment;
2891 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2892 OSL_DELAY(tmo->increment);
2893 tmo->increment *= 2;
2894 if (tmo->increment > tmo->tick)
2895 tmo->increment = tmo->tick;
2897 wait_queue_head_t delay_wait;
2898 DECLARE_WAITQUEUE(wait, current);
2899 init_waitqueue_head(&delay_wait);
2900 add_wait_queue(&delay_wait, &wait);
2901 set_current_state(TASK_INTERRUPTIBLE);
2902 (void)schedule_timeout(1);
2903 remove_wait_queue(&delay_wait, &wait);
2904 set_current_state(TASK_RUNNING);
2911 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2916 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2920 while (i < DHD_MAX_IFS) {
2921 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2929 struct net_device * dhd_idx2net(void *pub, int ifidx)
2931 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2932 struct dhd_info *dhd_info;
2934 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2936 dhd_info = dhd_pub->info;
2937 if (dhd_info && dhd_info->iflist[ifidx])
2938 return dhd_info->iflist[ifidx]->net;
2943 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2945 int i = DHD_MAX_IFS;
2949 if (name == NULL || *name == '\0')
2953 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2956 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2958 return i; /* default - the primary interface */
2962 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2964 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2968 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2969 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2973 if (dhd->iflist[ifidx] == NULL) {
2974 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2978 if (dhd->iflist[ifidx]->net)
2979 return dhd->iflist[ifidx]->net->name;
2985 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2988 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2991 for (i = 0; i < DHD_MAX_IFS; i++)
2992 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2993 return dhd->iflist[i]->mac_addr;
3000 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3002 struct net_device *dev;
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3004 struct netdev_hw_addr *ha;
3006 struct dev_mc_list *mclist;
3008 uint32 allmulti, cnt;
3015 if (!dhd->iflist[ifidx]) {
3016 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3019 dev = dhd->iflist[ifidx]->net;
3022 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3023 netif_addr_lock_bh(dev);
3024 #endif /* LINUX >= 2.6.27 */
3025 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3026 cnt = netdev_mc_count(dev);
3028 cnt = dev->mc_count;
3029 #endif /* LINUX >= 2.6.35 */
3030 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3031 netif_addr_unlock_bh(dev);
3032 #endif /* LINUX >= 2.6.27 */
3034 /* Determine initial value of allmulti flag */
3035 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3037 #ifdef PASS_ALL_MCAST_PKTS
3038 #ifdef PKT_FILTER_SUPPORT
3039 if (!dhd->pub.early_suspended)
3040 #endif /* PKT_FILTER_SUPPORT */
3042 #endif /* PASS_ALL_MCAST_PKTS */
3044 /* Send down the multicast list first. */
3047 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3048 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3049 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3050 dhd_ifname(&dhd->pub, ifidx), cnt));
3054 strncpy(bufp, "mcast_list", buflen - 1);
3055 bufp[buflen - 1] = '\0';
3056 bufp += strlen("mcast_list") + 1;
3059 memcpy(bufp, &cnt, sizeof(cnt));
3060 bufp += sizeof(cnt);
3062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3063 netif_addr_lock_bh(dev);
3064 #endif /* LINUX >= 2.6.27 */
3065 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3066 netdev_for_each_mc_addr(ha, dev) {
3069 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3070 bufp += ETHER_ADDR_LEN;
3073 #else /* LINUX < 2.6.35 */
3074 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3075 cnt--, mclist = mclist->next) {
3076 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3077 bufp += ETHER_ADDR_LEN;
3079 #endif /* LINUX >= 2.6.35 */
3080 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3081 netif_addr_unlock_bh(dev);
3082 #endif /* LINUX >= 2.6.27 */
3084 memset(&ioc, 0, sizeof(ioc));
3085 ioc.cmd = WLC_SET_VAR;
3090 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3092 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3093 dhd_ifname(&dhd->pub, ifidx), cnt));
3094 allmulti = cnt ? TRUE : allmulti;
3097 MFREE(dhd->pub.osh, buf, buflen);
3099 /* Now send the allmulti setting. This is based on the setting in the
3100 * net_device flags, but might be modified above to be turned on if we
3101 * were trying to set some addresses and dongle rejected it...
3104 buflen = sizeof("allmulti") + sizeof(allmulti);
3105 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3106 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3109 allmulti = htol32(allmulti);
3111 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3112 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3113 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3114 MFREE(dhd->pub.osh, buf, buflen);
3119 memset(&ioc, 0, sizeof(ioc));
3120 ioc.cmd = WLC_SET_VAR;
3125 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3127 DHD_ERROR(("%s: set allmulti %d failed\n",
3128 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3131 MFREE(dhd->pub.osh, buf, buflen);
3133 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3135 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3137 allmulti = htol32(allmulti);
3139 memset(&ioc, 0, sizeof(ioc));
3140 ioc.cmd = WLC_SET_PROMISC;
3141 ioc.buf = &allmulti;
3142 ioc.len = sizeof(allmulti);
3145 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3147 DHD_ERROR(("%s: set promisc %d failed\n",
3148 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3153 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3159 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3160 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3163 memset(&ioc, 0, sizeof(ioc));
3164 ioc.cmd = WLC_SET_VAR;
3169 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3171 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3173 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3175 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3182 extern struct net_device *ap_net_dev;
3183 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3187 /* Get psta/psr configuration configuration */
3188 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3190 dhd_info_t *dhd = dhdp->info;
3191 return (int)dhd->psta_mode;
3193 /* Set psta/psr configuration configuration */
3194 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3196 dhd_info_t *dhd = dhdp->info;
3197 dhd->psta_mode = val;
3200 #endif /* DHD_PSTA */
3203 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3205 dhd_info_t *dhd = handle;
3206 dhd_if_event_t *if_event = event_info;
3207 struct net_device *ndev;
3210 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3211 struct wireless_dev *vwdev, *primary_wdev;
3212 struct net_device *primary_ndev;
3213 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3215 if (event != DHD_WQ_WORK_IF_ADD) {
3216 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3221 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3226 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3230 dhd_net_if_lock_local(dhd);
3231 DHD_OS_WAKE_LOCK(&dhd->pub);
3232 DHD_PERIM_LOCK(&dhd->pub);
3234 ifidx = if_event->event.ifidx;
3235 bssidx = if_event->event.bssidx;
3236 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3238 /* This path is for non-android case */
3239 /* The interface name in host and in event msg are same */
3240 /* if name in event msg is used to create dongle if list on host */
3241 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3242 if_event->mac, bssidx, TRUE, if_event->name);
3244 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3248 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3249 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3250 if (unlikely(!vwdev)) {
3251 DHD_ERROR(("Could not allocate wireless device\n"));
3254 primary_ndev = dhd->pub.info->iflist[0]->net;
3255 primary_wdev = ndev_to_wdev(primary_ndev);
3256 vwdev->wiphy = primary_wdev->wiphy;
3257 vwdev->iftype = if_event->event.role;
3258 vwdev->netdev = ndev;
3259 ndev->ieee80211_ptr = vwdev;
3260 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3261 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3262 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3264 DHD_PERIM_UNLOCK(&dhd->pub);
3265 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3266 DHD_PERIM_LOCK(&dhd->pub);
3267 if (ret != BCME_OK) {
3268 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3269 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3272 #ifdef PCIE_FULL_DONGLE
3273 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3274 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3275 char iovbuf[WLC_IOCTL_SMLEN];
3278 memset(iovbuf, 0, sizeof(iovbuf));
3279 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3280 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3282 if (ret != BCME_OK) {
3283 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3284 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3287 #endif /* PCIE_FULL_DONGLE */
3290 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3292 DHD_PERIM_UNLOCK(&dhd->pub);
3293 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3294 dhd_net_if_unlock_local(dhd);
3298 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3300 dhd_info_t *dhd = handle;
3302 dhd_if_event_t *if_event = event_info;
3305 if (event != DHD_WQ_WORK_IF_DEL) {
3306 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3311 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3316 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3320 dhd_net_if_lock_local(dhd);
3321 DHD_OS_WAKE_LOCK(&dhd->pub);
3322 DHD_PERIM_LOCK(&dhd->pub);
3324 ifidx = if_event->event.ifidx;
3325 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3327 DHD_PERIM_UNLOCK(&dhd->pub);
3328 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3329 DHD_PERIM_LOCK(&dhd->pub);
3331 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3333 DHD_PERIM_UNLOCK(&dhd->pub);
3334 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3335 dhd_net_if_unlock_local(dhd);
3339 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3341 dhd_info_t *dhd = handle;
3342 dhd_if_t *ifp = event_info;
3344 if (event != DHD_WQ_WORK_SET_MAC) {
3345 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3349 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3353 dhd_net_if_lock_local(dhd);
3354 DHD_OS_WAKE_LOCK(&dhd->pub);
3355 DHD_PERIM_LOCK(&dhd->pub);
3359 unsigned long flags;
3361 DHD_GENERAL_LOCK(&dhd->pub, flags);
3362 in_ap = (ap_net_dev != NULL);
3363 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3366 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3373 if (ifp == NULL || !dhd->pub.up) {
3374 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3378 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3379 ifp->set_macaddress = FALSE;
3380 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3381 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3383 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3386 DHD_PERIM_UNLOCK(&dhd->pub);
3387 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3388 dhd_net_if_unlock_local(dhd);
3392 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3394 dhd_info_t *dhd = handle;
3395 dhd_if_t *ifp = event_info;
3398 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3399 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3404 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3408 dhd_net_if_lock_local(dhd);
3409 DHD_OS_WAKE_LOCK(&dhd->pub);
3410 DHD_PERIM_LOCK(&dhd->pub);
3415 unsigned long flags;
3416 DHD_GENERAL_LOCK(&dhd->pub, flags);
3417 in_ap = (ap_net_dev != NULL);
3418 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3421 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3423 ifp->set_multicast = FALSE;
<