2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2016, Broadcom Corporation
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
26 * <<Broadcom-WL-IPTag/Open:>>
28 * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/etherdevice.h>
48 #include <linux/random.h>
49 #include <linux/spinlock.h>
50 #include <linux/ethtool.h>
51 #include <linux/fcntl.h>
54 #include <linux/reboot.h>
55 #include <linux/notifier.h>
56 #include <net/addrconf.h>
57 #ifdef ENABLE_ADAPTIVE_SCHED
58 #include <linux/cpufreq.h>
59 #endif /* ENABLE_ADAPTIVE_SCHED */
61 #include <asm/uaccess.h>
62 #include <asm/unaligned.h>
66 #include <bcmendian.h>
69 #include <proto/ethernet.h>
70 #include <proto/bcmevent.h>
71 #include <proto/vlan.h>
72 #include <proto/802.3.h>
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
85 #ifdef CONFIG_HAS_WAKELOCK
86 #include <linux/wakelock.h>
89 #include <wl_cfg80211.h>
99 #include <linux/compat.h>
103 #include <dhd_wmf_linux.h>
107 #include <proto/bcmicmp.h>
108 #include <bcm_l2_filter.h>
109 #include <dhd_l2_filter.h>
110 #endif /* DHD_L2_FILTER */
113 #include <dhd_psta.h>
114 #endif /* DHD_PSTA */
117 #ifdef DHDTCPACK_SUPPRESS
119 #endif /* DHDTCPACK_SUPPRESS */
121 #ifdef DHD_DEBUG_PAGEALLOC
122 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
123 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
124 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
125 #endif /* DHD_DEBUG_PAGEALLOC */
129 /* Dynamic CPU selection for load balancing */
130 #include <linux/cpu.h>
131 #include <linux/cpumask.h>
132 #include <linux/notifier.h>
133 #include <linux/workqueue.h>
134 #include <asm/atomic.h>
136 #if !defined(DHD_LB_PRIMARY_CPUS)
137 #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
140 #if !defined(DHD_LB_SECONDARY_CPUS)
141 #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
144 #define HIST_BIN_SIZE 8
146 #if defined(DHD_LB_RXP)
147 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
148 #endif /* DHD_LB_RXP */
153 #include <linux/time.h>
156 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
157 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
158 #define TSMAX 1000 /* max no. of timing record kept */
161 static uint32 tsidx = 0;
162 static uint32 htsf_seqnum = 0;
164 struct timeval tsync;
165 static uint32 tsport = 5010;
167 typedef struct histo_ {
171 #if !ISPOWEROF2(DHD_SDALIGN)
172 #error DHD_SDALIGN is not a power of 2!
175 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
176 #endif /* WLMEDIA_HTSF */
181 #endif /* quote_str */
184 #endif /* quote_str */
186 #define quote_str(s) to_str(s)
188 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
189 #endif /* STBLINUX */
193 extern bool ap_cfg_running;
194 extern bool ap_fw_loaded;
196 extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
198 #ifdef FIX_CPU_MIN_CLOCK
199 #include <linux/pm_qos.h>
200 #endif /* FIX_CPU_MIN_CLOCK */
201 #ifdef SET_RANDOM_MAC_SOFTAP
202 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
203 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
205 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
206 #endif /* SET_RANDOM_MAC_SOFTAP */
207 #ifdef ENABLE_ADAPTIVE_SCHED
208 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
209 #ifndef CUSTOM_CPUFREQ_THRESH
210 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
211 #endif /* CUSTOM_CPUFREQ_THRESH */
212 #endif /* ENABLE_ADAPTIVE_SCHED */
214 /* enable HOSTIP cache update from the host side when an eth0:N is up */
215 #define AOE_IP_ALIAS_SUPPORT 1
219 #include <bcm_rpc_tp.h>
222 #include <wlfc_proto.h>
223 #include <dhd_wlfc.h>
226 #include <wl_android.h>
228 /* Maximum STA per radio */
229 #define DHD_MAX_STA 32
233 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
234 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
235 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
237 #ifdef ARP_OFFLOAD_SUPPORT
238 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
239 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
240 unsigned long event, void *ptr);
241 static struct notifier_block dhd_inetaddr_notifier = {
242 .notifier_call = dhd_inetaddr_notifier_call
244 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
245 * created in kernel notifier link list (with 'next' pointing to itself)
247 static bool dhd_inetaddr_notifier_registered = FALSE;
248 #endif /* ARP_OFFLOAD_SUPPORT */
250 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
251 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
252 unsigned long event, void *ptr);
253 static struct notifier_block dhd_inet6addr_notifier = {
254 .notifier_call = dhd_inet6addr_notifier_call
256 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
257 * created in kernel notifier link list (with 'next' pointing to itself)
259 static bool dhd_inet6addr_notifier_registered = FALSE;
260 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
262 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
263 #include <linux/suspend.h>
264 volatile bool dhd_mmc_suspend = FALSE;
265 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
266 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
268 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
269 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
271 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
272 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
275 MODULE_LICENSE("GPL and additional rights");
276 #endif /* LinuxVer */
281 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
283 #ifndef PROP_TXSTATUS
284 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
286 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
288 #endif /* BCM_FD_AGGR */
291 extern bool dhd_wlfc_skip_fc(void);
292 extern void dhd_wlfc_plat_init(void *dhd);
293 extern void dhd_wlfc_plat_deinit(void *dhd);
294 #endif /* PROP_TXSTATUS */
295 #ifdef USE_DYNAMIC_F2_BLKSIZE
296 extern uint sd_f2_blocksize;
297 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
298 #endif /* USE_DYNAMIC_F2_BLKSIZE */
300 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
306 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
308 /* Linux wireless extension support */
309 #if defined(WL_WIRELESS_EXT)
311 extern wl_iw_extra_params_t g_wl_iw_params;
312 #endif /* defined(WL_WIRELESS_EXT) */
314 #ifdef CONFIG_PARTIALSUSPEND_SLP
315 #include <linux/partialsuspend_slp.h>
316 #define CONFIG_HAS_EARLYSUSPEND
317 #define DHD_USE_EARLYSUSPEND
318 #define register_early_suspend register_pre_suspend
319 #define unregister_early_suspend unregister_pre_suspend
320 #define early_suspend pre_suspend
321 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
323 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
324 #include <linux/earlysuspend.h>
325 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
326 #endif /* CONFIG_PARTIALSUSPEND_SLP */
328 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
330 #ifdef PKT_FILTER_SUPPORT
331 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
332 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
333 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
338 extern int dhd_read_macaddr(struct dhd_info *dhd);
340 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
343 extern int dhd_write_macaddr(struct ether_addr *mac);
345 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
352 #ifdef DHD_FW_COREDUMP
353 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
354 #endif /* DHD_FW_COREDUMP */
356 static void dhd_log_dump_init(dhd_pub_t *dhd);
357 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
358 static void dhd_log_dump(void *handle, void *event_info, u8 event);
359 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
360 static int do_dhd_log_dump(dhd_pub_t *dhdp);
361 #endif /* DHD_LOG_DUMP */
363 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
364 static struct notifier_block dhd_reboot_notifier = {
365 .notifier_call = dhd_reboot_callback,
370 static int is_reboot = 0;
373 typedef struct dhd_if_event {
374 struct list_head list;
375 wl_event_data_if_t event;
376 char name[IFNAMSIZ+1];
377 uint8 mac[ETHER_ADDR_LEN];
380 /* Interface control information */
381 typedef struct dhd_if {
382 struct dhd_info *info; /* back pointer to dhd_info */
383 /* OS/stack specifics */
384 struct net_device *net;
385 int idx; /* iface idx in dongle */
386 uint subunit; /* subunit */
387 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
390 uint8 bssidx; /* bsscfg index for the interface */
391 bool attached; /* Delayed attachment when unset */
392 bool txflowcontrol; /* Per interface flow control indicator */
393 char name[IFNAMSIZ+1]; /* linux interface name */
394 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
395 struct net_device_stats stats;
397 dhd_wmf_t wmf; /* per bsscfg wmf setting */
399 #ifdef PCIE_FULL_DONGLE
400 struct list_head sta_list; /* sll of associated stations */
401 #if !defined(BCM_GMAC3)
402 spinlock_t sta_list_lock; /* lock for manipulating sll */
403 #endif /* ! BCM_GMAC3 */
404 #endif /* PCIE_FULL_DONGLE */
405 uint32 ap_isolate; /* ap-isolation settings */
410 arp_table_t *phnd_arp_table;
411 /* for Per BSS modification */
415 #endif /* DHD_L2_FILTER */
428 uint32 coef; /* scaling factor */
429 uint32 coefdec1; /* first decimal */
430 uint32 coefdec2; /* second decimal */
440 static tstamp_t ts[TSMAX];
441 static tstamp_t maxdelayts;
442 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
444 #endif /* WLMEDIA_HTSF */
446 struct ipv6_work_info_t {
453 typedef struct dhd_dump {
457 #endif /* DHD_DEBUG */
459 /* When Perimeter locks are deployed, any blocking calls must be preceeded
460 * with a PERIM UNLOCK and followed by a PERIM LOCK.
461 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
462 * wait_event_timeout().
465 /* Local private structure (extension of pub) */
466 typedef struct dhd_info {
467 #if defined(WL_WIRELESS_EXT)
468 wl_iw_t iw; /* wireless extensions state (must be first) */
469 #endif /* defined(WL_WIRELESS_EXT) */
471 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
473 void *adapter; /* adapter information, interrupt, fw path etc. */
474 char fw_path[PATH_MAX]; /* path to firmware image */
475 char nv_path[PATH_MAX]; /* path to nvram vars file */
476 char conf_path[PATH_MAX]; /* path to config vars file */
478 /* serialize dhd iovars */
479 struct mutex dhd_iovar_mutex;
481 struct semaphore proto_sem;
483 spinlock_t wlfc_spinlock;
485 #endif /* PROP_TXSTATUS */
489 wait_queue_head_t ioctl_resp_wait;
490 wait_queue_head_t d3ack_wait;
491 wait_queue_head_t dhd_bus_busy_state_wait;
492 uint32 default_wd_interval;
494 struct timer_list timer;
496 #ifdef DHD_PCIE_RUNTIMEPM
497 struct timer_list rpm_timer;
498 bool rpm_timer_valid;
499 tsk_ctl_t thr_rpm_ctl;
500 #endif /* DHD_PCIE_RUNTIMEPM */
501 struct tasklet_struct tasklet;
506 struct semaphore sdsem;
507 tsk_ctl_t thr_dpc_ctl;
508 tsk_ctl_t thr_wdt_ctl;
510 tsk_ctl_t thr_rxf_ctl;
512 bool rxthread_enabled;
515 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
516 struct wake_lock wl_wifi; /* Wifi wakelock */
517 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
518 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
519 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
520 struct wake_lock wl_evtwake; /* Wifi event wakelock */
521 #ifdef BCMPCIE_OOB_HOST_WAKE
522 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
523 #endif /* BCMPCIE_OOB_HOST_WAKE */
524 #ifdef DHD_USE_SCAN_WAKELOCK
525 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
526 #endif /* DHD_USE_SCAN_WAKELOCK */
527 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
529 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
530 /* net_device interface lock, prevent race conditions among net_dev interface
531 * calls and wifi_on or wifi_off
533 struct mutex dhd_net_if_mutex;
534 struct mutex dhd_suspend_mutex;
536 spinlock_t wakelock_spinlock;
537 spinlock_t wakelock_evt_spinlock;
538 uint32 wakelock_event_counter;
539 uint32 wakelock_counter;
540 int wakelock_wd_counter;
541 int wakelock_rx_timeout_enable;
542 int wakelock_ctrl_timeout_enable;
544 uint32 wakelock_before_waive;
546 /* Thread to issue ioctl for multicast */
547 wait_queue_head_t ctrl_wait;
548 atomic_t pend_8021x_cnt;
549 dhd_attach_states_t dhd_state;
551 dhd_event_log_t event_data;
552 #endif /* SHOW_LOGTRACE */
554 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
555 struct early_suspend early_suspend;
556 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
558 #ifdef ARP_OFFLOAD_SUPPORT
560 #endif /* ARP_OFFLOAD_SUPPORT */
564 struct timer_list rpcth_timer;
565 bool rpcth_timer_active;
568 #ifdef DHDTCPACK_SUPPRESS
569 spinlock_t tcpack_lock;
570 #endif /* DHDTCPACK_SUPPRESS */
571 #ifdef FIX_CPU_MIN_CLOCK
572 bool cpufreq_fix_status;
573 struct mutex cpufreq_fix;
574 struct pm_qos_request dhd_cpu_qos;
575 #ifdef FIX_BUS_MIN_CLOCK
576 struct pm_qos_request dhd_bus_qos;
577 #endif /* FIX_BUS_MIN_CLOCK */
578 #endif /* FIX_CPU_MIN_CLOCK */
579 void *dhd_deferred_wq;
580 #ifdef DEBUG_CPU_FREQ
581 struct notifier_block freq_trans;
582 int __percpu *new_freq;
585 struct notifier_block pm_notifier;
587 uint32 psta_mode; /* PSTA or PSR */
588 #endif /* DHD_PSTA */
591 struct timer_list join_timer;
592 u32 join_timeout_val;
593 bool join_timer_active;
594 uint scan_time_count;
595 struct timer_list scan_timer;
596 bool scan_timer_active;
599 /* CPU Load Balance dynamic CPU selection */
601 /* Variable that tracks the currect CPUs available for candidacy */
602 cpumask_var_t cpumask_curr_avail;
604 /* Primary and secondary CPU mask */
605 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
606 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
608 struct notifier_block cpu_notifier;
610 /* Tasklet to handle Tx Completion packet freeing */
611 struct tasklet_struct tx_compl_tasklet;
612 atomic_t tx_compl_cpu;
615 /* Tasklet to handle RxBuf Post during Rx completion */
616 struct tasklet_struct rx_compl_tasklet;
617 atomic_t rx_compl_cpu;
619 /* Napi struct for handling rx packet sendup. Packets are removed from
620 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
621 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
622 * to run to rx_napi_cpu.
624 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
625 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
626 struct napi_struct rx_napi_struct ____cacheline_aligned;
627 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
628 struct net_device *rx_napi_netdev; /* netdev of primary interface */
630 struct work_struct rx_napi_dispatcher_work;
631 struct work_struct tx_compl_dispatcher_work;
632 struct work_struct rx_compl_dispatcher_work;
633 /* Number of times DPC Tasklet ran */
636 /* Number of times NAPI processing got scheduled */
637 uint32 napi_sched_cnt;
639 /* Number of times NAPI processing ran on each available core */
640 uint32 napi_percpu_run_cnt[NR_CPUS];
642 /* Number of times RX Completions got scheduled */
643 uint32 rxc_sched_cnt;
644 /* Number of times RX Completion ran on each available core */
645 uint32 rxc_percpu_run_cnt[NR_CPUS];
647 /* Number of times TX Completions got scheduled */
648 uint32 txc_sched_cnt;
649 /* Number of times TX Completions ran on each available core */
650 uint32 txc_percpu_run_cnt[NR_CPUS];
653 /* Number of times each CPU came online */
654 uint32 cpu_online_cnt[NR_CPUS];
656 /* Number of times each CPU went offline */
657 uint32 cpu_offline_cnt[NR_CPUS];
660 * Consumer Histogram - NAPI RX Packet processing
661 * -----------------------------------------------
662 * On Each CPU, when the NAPI RX Packet processing call back was invoked
663 * how many packets were processed is captured in this data structure.
664 * Now its difficult to capture the "exact" number of packets processed.
665 * So considering the packet counter to be a 32 bit one, we have a
666 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
667 * processed is rounded off to the next power of 2 and put in the
668 * approriate "bin" the value in the bin gets incremented.
669 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
670 * and the packet count processed is as follows (assume the bin counters are 0)
671 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
672 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
673 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
675 uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
676 uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
677 uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
680 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
681 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
683 struct kobject dhd_kobj;
684 #ifdef SUPPORT_SENSORHUB
686 #endif /* SUPPORT_SENSORHUB */
688 struct delayed_work dhd_memdump_work;
691 #define DHDIF_FWDER(dhdif) FALSE
693 /* Flag to indicate if we should download firmware on driver load */
694 uint dhd_download_fw_on_driverload = TRUE;
696 /* Flag to indicate if driver is initialized */
697 uint dhd_driver_init_done = FALSE;
699 /* Definitions to provide path to the firmware and nvram
700 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
702 char firmware_path[MOD_PARAM_PATHLEN];
703 char nvram_path[MOD_PARAM_PATHLEN];
704 char config_path[MOD_PARAM_PATHLEN];
706 /* backup buffer for firmware and nvram path */
707 char fw_bak_path[MOD_PARAM_PATHLEN];
708 char nv_bak_path[MOD_PARAM_PATHLEN];
710 /* information string to keep firmware, chio, cheip version info visiable from log */
711 char info_string[MOD_PARAM_INFOLEN];
712 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
714 int disable_proptx = 0;
715 module_param(op_mode, int, 0644);
717 #if defined(DHD_LB_RXP)
718 static int dhd_napi_weight = 32;
719 module_param(dhd_napi_weight, int, 0644);
720 #endif /* DHD_LB_RXP */
722 extern int wl_control_wl_start(struct net_device *dev);
723 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
724 struct semaphore dhd_registration_sem;
725 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
727 /* deferred handlers */
728 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
729 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
730 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
731 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
732 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
733 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
734 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
736 extern void dhd_netdev_free(struct net_device *ndev);
737 #endif /* WL_CFG80211 */
740 module_param(dhd_msg_level, int, 0);
741 #if defined(WL_WIRELESS_EXT)
742 module_param(iw_msg_level, int, 0);
745 module_param(wl_dbg_level, int, 0);
747 module_param(android_msg_level, int, 0);
748 module_param(config_msg_level, int, 0);
750 #ifdef ARP_OFFLOAD_SUPPORT
751 /* ARP offload enable */
752 uint dhd_arp_enable = TRUE;
753 module_param(dhd_arp_enable, uint, 0);
755 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
757 #ifdef ENABLE_ARP_SNOOP_MODE
758 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
760 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
761 #endif /* ENABLE_ARP_SNOOP_MODE */
763 module_param(dhd_arp_mode, uint, 0);
764 #endif /* ARP_OFFLOAD_SUPPORT */
766 /* Disable Prop tx */
767 module_param(disable_proptx, int, 0644);
768 /* load firmware and/or nvram values from the filesystem */
769 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
770 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
771 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
773 /* Watchdog interval */
775 /* extend watchdog expiration to 2 seconds when DPC is running */
776 #define WATCHDOG_EXTEND_INTERVAL (2000)
778 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
779 module_param(dhd_watchdog_ms, uint, 0);
781 #ifdef DHD_PCIE_RUNTIMEPM
782 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
783 #endif /* DHD_PCIE_RUNTIMEPMT */
784 #if defined(DHD_DEBUG)
785 /* Console poll interval */
786 uint dhd_console_ms = 0;
787 module_param(dhd_console_ms, uint, 0644);
788 #endif /* defined(DHD_DEBUG) */
791 uint dhd_slpauto = TRUE;
792 module_param(dhd_slpauto, uint, 0);
794 #ifdef PKT_FILTER_SUPPORT
795 /* Global Pkt filter enable control */
796 uint dhd_pkt_filter_enable = TRUE;
797 module_param(dhd_pkt_filter_enable, uint, 0);
800 /* Pkt filter init setup */
801 uint dhd_pkt_filter_init = 0;
802 module_param(dhd_pkt_filter_init, uint, 0);
804 /* Pkt filter mode control */
805 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
806 uint dhd_master_mode = FALSE;
808 uint dhd_master_mode = FALSE;
809 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
810 module_param(dhd_master_mode, uint, 0);
812 int dhd_watchdog_prio = 0;
813 module_param(dhd_watchdog_prio, int, 0);
815 /* DPC thread priority */
816 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
817 module_param(dhd_dpc_prio, int, 0);
819 /* RX frame thread priority */
820 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
821 module_param(dhd_rxf_prio, int, 0);
823 int passive_channel_skip = 0;
824 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
826 #if !defined(BCMDHDUSB)
827 extern int dhd_dongle_ramsize;
828 module_param(dhd_dongle_ramsize, int, 0);
829 #endif /* BCMDHDUSB */
831 /* Keep track of number of instances */
832 static int dhd_found = 0;
833 static int instance_base = 0; /* Starting instance number */
834 module_param(instance_base, int, 0644);
836 /* Functions to manage sysfs interface for dhd */
837 static int dhd_sysfs_init(dhd_info_t *dhd);
838 static void dhd_sysfs_exit(dhd_info_t *dhd);
843 dhd_lb_set_default_cpus(dhd_info_t *dhd)
845 /* Default CPU allocation for the jobs */
846 atomic_set(&dhd->rx_napi_cpu, 1);
847 atomic_set(&dhd->rx_compl_cpu, 2);
848 atomic_set(&dhd->tx_compl_cpu, 2);
852 dhd_cpumasks_deinit(dhd_info_t *dhd)
854 free_cpumask_var(dhd->cpumask_curr_avail);
855 free_cpumask_var(dhd->cpumask_primary);
856 free_cpumask_var(dhd->cpumask_primary_new);
857 free_cpumask_var(dhd->cpumask_secondary);
858 free_cpumask_var(dhd->cpumask_secondary_new);
862 dhd_cpumasks_init(dhd_info_t *dhd)
868 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
869 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
870 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
871 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
872 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
873 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
878 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
879 cpumask_clear(dhd->cpumask_primary);
880 cpumask_clear(dhd->cpumask_secondary);
882 cpus = DHD_LB_PRIMARY_CPUS;
883 for (id = 0; id < NR_CPUS; id++) {
884 if (isset(&cpus, id))
885 cpumask_set_cpu(id, dhd->cpumask_primary);
888 cpus = DHD_LB_SECONDARY_CPUS;
889 for (id = 0; id < NR_CPUS; id++) {
890 if (isset(&cpus, id))
891 cpumask_set_cpu(id, dhd->cpumask_secondary);
896 dhd_cpumasks_deinit(dhd);
901 * The CPU Candidacy Algorithm
902 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
903 * The available CPUs for selection are divided into two groups
904 * Primary Set - A CPU mask that carries the First Choice CPUs
905 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
907 * There are two types of Job, that needs to be assigned to
908 * the CPUs, from one of the above mentioned CPU group. The Jobs are
909 * 1) Rx Packet Processing - napi_cpu
910 * 2) Completion Processiong (Tx, RX) - compl_cpu
912 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
913 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
914 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
915 * If there are more processors free, it assigns one to compl_cpu.
916 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
917 * CPU, as much as possible.
919 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
920 * would allow Tx completion skb's to be released into a local free pool from
921 * which the rx buffer posts could have been serviced. it is important to note
922 * that a Tx packet may not have a large enough buffer for rx posting.
924 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
926 uint32 primary_available_cpus; /* count of primary available cpus */
927 uint32 secondary_available_cpus; /* count of secondary available cpus */
928 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
929 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
931 cpumask_clear(dhd->cpumask_primary_new);
932 cpumask_clear(dhd->cpumask_secondary_new);
935 * Now select from the primary mask. Even if a Job is
936 * already running on a CPU in secondary group, we still move
937 * to primary CPU. So no conditional checks.
939 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
940 dhd->cpumask_curr_avail);
942 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
943 dhd->cpumask_curr_avail);
945 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
947 if (primary_available_cpus > 0) {
948 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
950 /* If no further CPU is available,
951 * cpumask_next returns >= nr_cpu_ids
953 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
954 if (compl_cpu >= nr_cpu_ids)
958 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
959 __FUNCTION__, napi_cpu, compl_cpu));
961 /* -- Now check for the CPUs from the secondary mask -- */
962 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
964 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
965 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
967 if (secondary_available_cpus > 0) {
968 /* At this point if napi_cpu is unassigned it means no CPU
969 * is online from Primary Group
972 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
973 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
974 } else if (compl_cpu == 0) {
975 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
978 /* If no CPU was available for completion, choose CPU 0 */
979 if (compl_cpu >= nr_cpu_ids)
982 if ((primary_available_cpus == 0) &&
983 (secondary_available_cpus == 0)) {
984 /* No CPUs available from primary or secondary mask */
989 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
990 __FUNCTION__, napi_cpu, compl_cpu));
991 ASSERT(napi_cpu < nr_cpu_ids);
992 ASSERT(compl_cpu < nr_cpu_ids);
994 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
995 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
996 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1001 * Function to handle CPU Hotplug notifications.
1002 * One of the task it does is to trigger the CPU Candidacy algorithm
1003 * for load balancing.
1006 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1008 unsigned int cpu = (unsigned int)(long)hcpu;
1010 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1015 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1016 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1017 dhd_select_cpu_candidacy(dhd);
1020 case CPU_DOWN_PREPARE:
1021 case CPU_DOWN_PREPARE_FROZEN:
1022 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1023 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1024 dhd_select_cpu_candidacy(dhd);
1033 #if defined(DHD_LB_STATS)
1034 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1040 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1047 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1051 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1052 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1053 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1054 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1056 for (i = 0; i < NR_CPUS; i++) {
1057 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1058 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1059 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1061 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1062 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1065 for (i = 0; i < NR_CPUS; i++) {
1066 for (j = 0; j < HIST_BIN_SIZE; j++) {
1067 DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1068 DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1069 DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1076 static void dhd_lb_stats_dump_histo(
1077 struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1080 uint32 per_cpu_total[NR_CPUS] = {0};
1083 bcm_bprintf(strbuf, "CPU: \t\t");
1084 for (i = 0; i < num_possible_cpus(); i++)
1085 bcm_bprintf(strbuf, "%d\t", i);
1086 bcm_bprintf(strbuf, "\nBin\n");
1088 for (i = 0; i < HIST_BIN_SIZE; i++) {
1089 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1090 for (j = 0; j < num_possible_cpus(); j++) {
1091 bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1093 bcm_bprintf(strbuf, "\n");
1095 bcm_bprintf(strbuf, "Per CPU Total \t");
1097 for (i = 0; i < num_possible_cpus(); i++) {
1098 for (j = 0; j < HIST_BIN_SIZE; j++) {
1099 per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1101 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1102 total += per_cpu_total[i];
1104 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1109 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1113 bcm_bprintf(strbuf, "CPU: \t");
1114 for (i = 0; i < num_possible_cpus(); i++)
1115 bcm_bprintf(strbuf, "%d\t", i);
1116 bcm_bprintf(strbuf, "\n");
1118 bcm_bprintf(strbuf, "Val: \t");
1119 for (i = 0; i < num_possible_cpus(); i++)
1120 bcm_bprintf(strbuf, "%u\t", *(p+i));
1121 bcm_bprintf(strbuf, "\n");
1125 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1129 if (dhdp == NULL || strbuf == NULL) {
1130 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1131 __FUNCTION__, dhdp, strbuf));
1137 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1141 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1142 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1144 bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1145 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1147 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1148 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1149 dhd->txc_sched_cnt);
1151 bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1152 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1153 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1154 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1155 #endif /* DHD_LB_RXP */
1158 bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1159 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1160 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1161 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1162 #endif /* DHD_LB_RXC */
1166 bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1167 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1168 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1169 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1170 #endif /* DHD_LB_TXC */
1173 static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1178 bin_power = next_larger_power2(count);
1180 switch (bin_power) {
1182 case 1: /* Fall through intentionally */
1183 case 2: p = bin + 0; break;
1184 case 4: p = bin + 1; break;
1185 case 8: p = bin + 2; break;
1186 case 16: p = bin + 3; break;
1187 case 32: p = bin + 4; break;
1188 case 64: p = bin + 5; break;
1189 case 128: p = bin + 6; break;
1190 default : p = bin + 7; break;
1197 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1200 dhd_info_t *dhd = dhdp->info;
1204 dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1209 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1212 dhd_info_t *dhd = dhdp->info;
1216 dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1221 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1224 dhd_info_t *dhd = dhdp->info;
1228 dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1233 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1235 dhd_info_t *dhd = dhdp->info;
1236 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1239 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1241 dhd_info_t *dhd = dhdp->info;
1242 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1245 #endif /* DHD_LB_STATS */
1249 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1250 int g_frameburst = 1;
1251 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1253 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1255 /* DHD Perimiter lock only used in router with bypass forwarding. */
1256 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1257 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1258 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1260 #ifdef PCIE_FULL_DONGLE
1261 #if defined(BCM_GMAC3)
1262 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1263 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1264 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1266 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1267 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1268 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1269 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1271 #else /* ! BCM_GMAC3 */
1272 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1273 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1274 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1275 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1276 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1278 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1279 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1280 struct list_head *snapshot_list);
1281 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1282 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1283 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1284 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1286 #endif /* ! BCM_GMAC3 */
1287 #endif /* PCIE_FULL_DONGLE */
1289 /* Control fw roaming */
1290 uint dhd_roam_disable = 0;
1293 extern int dhd_dbg_init(dhd_pub_t *dhdp);
1294 extern void dhd_dbg_remove(void);
1297 /* Control radio state */
1298 uint dhd_radio_up = 1;
1300 /* Network inteface name */
1301 char iface_name[IFNAMSIZ] = {'\0'};
1302 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1304 /* The following are specific to the SDIO dongle */
1306 /* IOCTL response timeout */
1307 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1309 /* Idle timeout for backplane clock */
1310 int dhd_idletime = DHD_IDLETIME_TICKS;
1311 module_param(dhd_idletime, int, 0);
1314 uint dhd_poll = FALSE;
1315 module_param(dhd_poll, uint, 0);
1317 /* Use interrupts */
1318 uint dhd_intr = TRUE;
1319 module_param(dhd_intr, uint, 0);
1321 /* SDIO Drive Strength (in milliamps) */
1322 uint dhd_sdiod_drive_strength = 6;
1323 module_param(dhd_sdiod_drive_strength, uint, 0);
1327 extern uint dhd_txbound;
1328 extern uint dhd_rxbound;
1329 module_param(dhd_txbound, uint, 0);
1330 module_param(dhd_rxbound, uint, 0);
1332 /* Deferred transmits */
1333 extern uint dhd_deferred_tx;
1334 module_param(dhd_deferred_tx, uint, 0);
1336 #endif /* BCMSDIO */
1340 /* Echo packet generator (pkts/s) */
1341 uint dhd_pktgen = 0;
1342 module_param(dhd_pktgen, uint, 0);
1344 /* Echo packet len (0 => sawtooth, max 2040) */
1345 uint dhd_pktgen_len = 0;
1346 module_param(dhd_pktgen_len, uint, 0);
1351 /* Allow delayed firmware download for debug purpose */
1352 int allow_delay_fwdl = FALSE;
1353 module_param(allow_delay_fwdl, int, 0);
1355 extern char dhd_version[];
1356 extern char fw_version[];
1358 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1359 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1360 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1361 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1362 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1365 void htsf_update(dhd_info_t *dhd, void *data);
1366 tsf_t prev_tsf, cur_tsf;
1368 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1369 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1370 static void dhd_dump_latency(void);
1371 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1372 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1373 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1374 #endif /* WLMEDIA_HTSF */
1376 /* Monitor interface */
1377 int dhd_monitor_init(void *dhd_pub);
1378 int dhd_monitor_uninit(void);
1381 #if defined(WL_WIRELESS_EXT)
1382 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1383 #endif /* defined(WL_WIRELESS_EXT) */
1385 static void dhd_dpc(ulong data);
1387 extern int dhd_wait_pend8021x(struct net_device *dev);
1388 void dhd_os_wd_timer_extend(void *bus, bool extend);
1392 #error TOE requires BDC
1394 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1395 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1398 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1399 wl_event_msg_t *event_ptr, void **data_ptr);
1401 #if defined(CONFIG_PM_SLEEP)
1402 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1404 int ret = NOTIFY_DONE;
1405 bool suspend = FALSE;
1406 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1408 BCM_REFERENCE(dhdinfo);
1411 case PM_HIBERNATION_PREPARE:
1412 case PM_SUSPEND_PREPARE:
1416 case PM_POST_HIBERNATION:
1417 case PM_POST_SUSPEND:
1422 #if defined(SUPPORT_P2P_GO_PS)
1423 #ifdef PROP_TXSTATUS
1425 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1426 dhd_wlfc_suspend(&dhdinfo->pub);
1427 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1429 dhd_wlfc_resume(&dhdinfo->pub);
1430 #endif /* PROP_TXSTATUS */
1431 #endif /* defined(SUPPORT_P2P_GO_PS) */
1433 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1434 KERNEL_VERSION(2, 6, 39))
1435 dhd_mmc_suspend = suspend;
1442 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1443 * created in kernel notifier link list (with 'next' pointing to itself)
1445 static bool dhd_pm_notifier_registered = FALSE;
1447 extern int register_pm_notifier(struct notifier_block *nb);
1448 extern int unregister_pm_notifier(struct notifier_block *nb);
1449 #endif /* CONFIG_PM_SLEEP */
1451 /* Request scheduling of the bus rx frame */
1452 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1453 static void dhd_os_rxflock(dhd_pub_t *pub);
1454 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1456 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1457 typedef struct dhd_dev_priv {
1458 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1459 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1460 int ifidx; /* interface index */
1463 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1464 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1465 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1466 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1467 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1469 /** Clear the dhd net_device's private structure. */
1471 dhd_dev_priv_clear(struct net_device * dev)
1473 dhd_dev_priv_t * dev_priv;
1474 ASSERT(dev != (struct net_device *)NULL);
1475 dev_priv = DHD_DEV_PRIV(dev);
1476 dev_priv->dhd = (dhd_info_t *)NULL;
1477 dev_priv->ifp = (dhd_if_t *)NULL;
1478 dev_priv->ifidx = DHD_BAD_IF;
1481 /** Setup the dhd net_device's private structure. */
1483 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1486 dhd_dev_priv_t * dev_priv;
1487 ASSERT(dev != (struct net_device *)NULL);
1488 dev_priv = DHD_DEV_PRIV(dev);
1489 dev_priv->dhd = dhd;
1490 dev_priv->ifp = ifp;
1491 dev_priv->ifidx = ifidx;
1494 #ifdef PCIE_FULL_DONGLE
1496 /** Dummy objects are defined with state representing bad|down.
1497 * Performance gains from reducing branch conditionals, instruction parallelism,
1498 * dual issue, reducing load shadows, avail of larger pipelines.
1499 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1500 * is accessed via the dhd_sta_t.
1503 /* Dummy dhd_info object */
1504 dhd_info_t dhd_info_null = {
1505 #if defined(BCM_GMAC3)
1509 .info = &dhd_info_null,
1510 #ifdef DHDTCPACK_SUPPRESS
1511 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1512 #endif /* DHDTCPACK_SUPPRESS */
1514 .busstate = DHD_BUS_DOWN
1517 #define DHD_INFO_NULL (&dhd_info_null)
1518 #define DHD_PUB_NULL (&dhd_info_null.pub)
1520 /* Dummy netdevice object */
1521 struct net_device dhd_net_dev_null = {
1522 .reg_state = NETREG_UNREGISTERED
1524 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1526 /* Dummy dhd_if object */
1527 dhd_if_t dhd_if_null = {
1528 #if defined(BCM_GMAC3)
1532 .wmf = { .wmf_enable = TRUE },
1534 .info = DHD_INFO_NULL,
1535 .net = DHD_NET_DEV_NULL,
1538 #define DHD_IF_NULL (&dhd_if_null)
1540 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1542 /** Interface STA list management. */
1544 /** Fetch the dhd_if object, given the interface index in the dhd. */
1545 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1547 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1548 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1549 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1551 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1552 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1553 static void dhd_if_flush_sta(dhd_if_t * ifp);
1555 /* Construct/Destruct a sta pool. */
1556 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1557 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1558 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1559 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1562 /* Return interface pointer */
1563 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1565 ASSERT(ifidx < DHD_MAX_IFS);
1567 if (ifidx >= DHD_MAX_IFS)
1570 return dhdp->info->iflist[ifidx];
1573 /** Reset a dhd_sta object and free into the dhd pool. */
1575 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1579 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1581 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1584 * Flush and free all packets in all flowring's queues belonging to sta.
1585 * Packets in flow ring will be flushed later.
1587 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1588 uint16 flowid = sta->flowid[prio];
1590 if (flowid != FLOWID_INVALID) {
1591 unsigned long flags;
1592 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1593 flow_ring_node_t * flow_ring_node;
1595 #ifdef DHDTCPACK_SUPPRESS
1596 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1597 * when there is a newly coming packet from network stack.
1599 dhd_tcpack_info_tbl_clean(dhdp);
1600 #endif /* DHDTCPACK_SUPPRESS */
1602 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1603 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1604 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1606 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1608 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1609 PKTFREE(dhdp->osh, pkt, TRUE);
1613 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1614 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1617 sta->flowid[prio] = FLOWID_INVALID;
1620 id16_map_free(dhdp->staid_allocator, sta->idx);
1621 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1622 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1623 sta->ifidx = DHD_BAD_IF;
1624 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1625 INIT_LIST_HEAD(&sta->list);
1626 sta->idx = ID16_INVALID; /* implying free */
1629 /** Allocate a dhd_sta object from the dhd pool. */
1631 dhd_sta_alloc(dhd_pub_t * dhdp)
1635 dhd_sta_pool_t * sta_pool;
1637 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1639 idx = id16_map_alloc(dhdp->staid_allocator);
1640 if (idx == ID16_INVALID) {
1641 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1642 return DHD_STA_NULL;
1645 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1646 sta = &sta_pool[idx];
1648 ASSERT((sta->idx == ID16_INVALID) &&
1649 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1651 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1653 sta->idx = idx; /* implying allocated */
1658 /** Delete all STAs in an interface's STA list. */
1660 dhd_if_del_sta_list(dhd_if_t *ifp)
1662 dhd_sta_t *sta, *next;
1663 unsigned long flags;
1665 DHD_IF_STA_LIST_LOCK(ifp, flags);
1667 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1668 #if defined(BCM_GMAC3)
1670 /* Remove sta from WOFA forwarder. */
1671 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1673 #endif /* BCM_GMAC3 */
1674 list_del(&sta->list);
1675 dhd_sta_free(&ifp->info->pub, sta);
1678 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1683 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1685 dhd_if_flush_sta(dhd_if_t * ifp)
1687 #if defined(BCM_GMAC3)
1689 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1690 dhd_sta_t *sta, *next;
1691 unsigned long flags;
1693 DHD_IF_STA_LIST_LOCK(ifp, flags);
1695 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1696 /* Remove any sta entry from WOFA forwarder. */
1697 fwder_flush(ifp->fwdh, (wofa_t)sta);
1700 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1702 #endif /* BCM_GMAC3 */
1705 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1707 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1709 int idx, prio, sta_pool_memsz;
1711 dhd_sta_pool_t * sta_pool;
1712 void * staid_allocator;
1714 ASSERT(dhdp != (dhd_pub_t *)NULL);
1715 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1717 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1718 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1719 if (staid_allocator == NULL) {
1720 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1724 /* Pre allocate a pool of dhd_sta objects (one extra). */
1725 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1726 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1727 if (sta_pool == NULL) {
1728 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1729 id16_map_fini(dhdp->osh, staid_allocator);
1733 dhdp->sta_pool = sta_pool;
1734 dhdp->staid_allocator = staid_allocator;
1736 /* Initialize all sta(s) for the pre-allocated free pool. */
1737 bzero((uchar *)sta_pool, sta_pool_memsz);
1738 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1739 sta = &sta_pool[idx];
1740 sta->idx = id16_map_alloc(staid_allocator);
1741 ASSERT(sta->idx <= max_sta);
1743 /* Now place them into the pre-allocated free pool. */
1744 for (idx = 1; idx <= max_sta; idx++) {
1745 sta = &sta_pool[idx];
1746 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1747 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1749 dhd_sta_free(dhdp, sta);
1755 /** Destruct the pool of dhd_sta_t objects.
1756 * Caller must ensure that no STA objects are currently associated with an if.
1759 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1761 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1765 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1766 for (idx = 1; idx <= max_sta; idx++) {
1767 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1768 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1770 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1771 dhdp->sta_pool = NULL;
1774 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1775 dhdp->staid_allocator = NULL;
1778 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1780 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1782 int idx, prio, sta_pool_memsz;
1784 dhd_sta_pool_t * sta_pool;
1785 void *staid_allocator;
1788 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1792 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1793 staid_allocator = dhdp->staid_allocator;
1796 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1800 if (!staid_allocator) {
1801 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1805 /* clear free pool */
1806 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1807 bzero((uchar *)sta_pool, sta_pool_memsz);
1809 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1810 id16_map_clear(staid_allocator, max_sta, 1);
1812 /* Initialize all sta(s) for the pre-allocated free pool. */
1813 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1814 sta = &sta_pool[idx];
1815 sta->idx = id16_map_alloc(staid_allocator);
1816 ASSERT(sta->idx <= max_sta);
1818 /* Now place them into the pre-allocated free pool. */
1819 for (idx = 1; idx <= max_sta; idx++) {
1820 sta = &sta_pool[idx];
1821 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1822 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1824 dhd_sta_free(dhdp, sta);
1828 /** Find STA with MAC address ea in an interface's STA list. */
1830 dhd_find_sta(void *pub, int ifidx, void *ea)
1834 unsigned long flags;
1837 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1839 return DHD_STA_NULL;
1841 DHD_IF_STA_LIST_LOCK(ifp, flags);
1843 list_for_each_entry(sta, &ifp->sta_list, list) {
1844 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1845 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1850 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1852 return DHD_STA_NULL;
1855 /** Add STA into the interface's STA list. */
1857 dhd_add_sta(void *pub, int ifidx, void *ea)
1861 unsigned long flags;
1864 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1866 return DHD_STA_NULL;
1868 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1869 if (sta == DHD_STA_NULL) {
1870 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1871 return DHD_STA_NULL;
1874 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1876 /* link the sta and the dhd interface */
1879 INIT_LIST_HEAD(&sta->list);
1881 DHD_IF_STA_LIST_LOCK(ifp, flags);
1883 list_add_tail(&sta->list, &ifp->sta_list);
1885 #if defined(BCM_GMAC3)
1887 ASSERT(ISALIGNED(ea, 2));
1888 /* Add sta to WOFA forwarder. */
1889 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1891 #endif /* BCM_GMAC3 */
1893 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1898 /** Delete STA from the interface's STA list. */
1900 dhd_del_sta(void *pub, int ifidx, void *ea)
1902 dhd_sta_t *sta, *next;
1904 unsigned long flags;
1907 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1911 DHD_IF_STA_LIST_LOCK(ifp, flags);
1913 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1914 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1915 #if defined(BCM_GMAC3)
1916 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1917 ASSERT(ISALIGNED(ea, 2));
1918 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1920 #endif /* BCM_GMAC3 */
1921 list_del(&sta->list);
1922 dhd_sta_free(&ifp->info->pub, sta);
1926 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1927 #ifdef DHD_L2_FILTER
1928 if (ifp->parp_enable) {
1929 /* clear Proxy ARP cache of specific Ethernet Address */
1930 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1931 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1933 #endif /* DHD_L2_FILTER */
1937 /** Add STA if it doesn't exist. Not reentrant. */
1939 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1943 sta = dhd_find_sta(pub, ifidx, ea);
1947 sta = dhd_add_sta(pub, ifidx, ea);
1953 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1954 #if !defined(BCM_GMAC3)
1955 static struct list_head *
1956 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1958 unsigned long flags;
1959 dhd_sta_t *sta, *snapshot;
1961 INIT_LIST_HEAD(snapshot_list);
1963 DHD_IF_STA_LIST_LOCK(ifp, flags);
1965 list_for_each_entry(sta, &ifp->sta_list, list) {
1966 /* allocate one and add to snapshot */
1967 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1968 if (snapshot == NULL) {
1969 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1973 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1975 INIT_LIST_HEAD(&snapshot->list);
1976 list_add_tail(&snapshot->list, snapshot_list);
1979 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1981 return snapshot_list;
1985 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1987 dhd_sta_t *sta, *next;
1989 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1990 list_del(&sta->list);
1991 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1994 #endif /* !BCM_GMAC3 */
1995 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1998 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1999 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2000 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2001 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2002 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2003 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2004 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2005 #endif /* PCIE_FULL_DONGLE */
2010 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2012 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2013 * CPU and schedules a tasklet.
2014 * @tasklet: opaque pointer to the tasklet
2017 dhd_tasklet_schedule(void *tasklet)
2019 tasklet_schedule((struct tasklet_struct *)tasklet);
2023 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2024 * @tasklet: tasklet to be scheduled
2025 * @on_cpu: cpu core id
2027 * If the requested cpu is online, then an IPI is sent to this cpu via the
2028 * smp_call_function_single with no wait and the tasklet_schedule function
2029 * will be invoked to schedule the specified tasklet on the requested CPU.
2032 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2035 smp_call_function_single(on_cpu,
2036 dhd_tasklet_schedule, (void *)tasklet, wait);
2038 #endif /* DHD_LB_TXC || DHD_LB_RXC */
2041 #if defined(DHD_LB_TXC)
2043 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2044 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2045 * freeing the packets placed in the tx_compl workq
2048 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2050 dhd_info_t *dhd = dhdp->info;
2051 int curr_cpu, on_cpu;
2053 if (dhd->rx_napi_netdev == NULL) {
2054 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2058 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2060 * If the destination CPU is NOT online or is same as current CPU
2061 * no need to schedule the work
2063 curr_cpu = get_cpu();
2066 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2068 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2069 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2071 schedule_work(&dhd->tx_compl_dispatcher_work);
2075 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2077 struct dhd_info *dhd =
2078 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2082 cpu = atomic_read(&dhd->tx_compl_cpu);
2083 if (!cpu_online(cpu))
2084 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2086 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2090 #endif /* DHD_LB_TXC */
2093 #if defined(DHD_LB_RXC)
2095 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2096 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2097 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2098 * placed in the rx_compl workq.
2100 * @dhdp: pointer to dhd_pub object
2103 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2105 dhd_info_t *dhd = dhdp->info;
2106 int curr_cpu, on_cpu;
2108 if (dhd->rx_napi_netdev == NULL) {
2109 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2113 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2115 * If the destination CPU is NOT online or is same as current CPU
2116 * no need to schedule the work
2118 curr_cpu = get_cpu();
2121 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2123 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2124 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2126 schedule_work(&dhd->rx_compl_dispatcher_work);
2130 static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2132 struct dhd_info *dhd =
2133 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2137 cpu = atomic_read(&dhd->tx_compl_cpu);
2138 if (!cpu_online(cpu))
2139 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2141 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2145 #endif /* DHD_LB_RXC */
2148 #if defined(DHD_LB_RXP)
2150 * dhd_napi_poll - Load balance napi poll function to process received
2151 * packets and send up the network stack using netif_receive_skb()
2153 * @napi: napi object in which context this poll function is invoked
2154 * @budget: number of packets to be processed.
2156 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2157 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2158 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2159 * packet tag and sendup.
2162 dhd_napi_poll(struct napi_struct *napi, int budget)
2165 const int pkt_count = 1;
2167 struct sk_buff * skb;
2168 unsigned long flags;
2169 struct dhd_info *dhd;
2171 struct sk_buff_head rx_process_queue;
2173 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2174 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2175 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2177 __skb_queue_head_init(&rx_process_queue);
2179 /* extract the entire rx_napi_queue into local rx_process_queue */
2180 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2181 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2182 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2184 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2185 OSL_PREFETCH(skb->data);
2187 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2189 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2190 __FUNCTION__, skb, ifid));
2192 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2196 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2198 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2199 napi_complete(napi);
2205 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2206 * poll list. This function may be invoked via the smp_call_function_single
2207 * from a remote CPU.
2209 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2210 * after the napi_struct is added to the softnet data's poll_list
2212 * @info: pointer to a dhd_info struct
2215 dhd_napi_schedule(void *info)
2217 dhd_info_t *dhd = (dhd_info_t *)info;
2219 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2220 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2222 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2223 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2224 __napi_schedule(&dhd->rx_napi_struct);
2225 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2229 * If the rx_napi_struct was already running, then we let it complete
2230 * processing all its packets. The rx_napi_struct may only run on one
2231 * core at a time, to avoid out-of-order handling.
2236 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2237 * action after placing the dhd's rx_process napi object in the the remote CPU's
2238 * softnet data's poll_list.
2240 * @dhd: dhd_info which has the rx_process napi object
2241 * @on_cpu: desired remote CPU id
2244 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2246 int wait = 0; /* asynchronous IPI */
2248 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2249 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2251 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2252 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2253 __FUNCTION__, on_cpu));
2256 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2262 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2263 * Why should we do this?
2264 * The candidacy algorithm is run from the call back function
2265 * registered to CPU hotplug notifier. This call back happens from Worker
2266 * context. The dhd_napi_schedule_on is also from worker context.
2267 * Note that both of this can run on two different CPUs at the same time.
2268 * So we can possibly have a window where a given CPUn is being brought
2269 * down from CPUm while we try to run a function on CPUn.
2270 * To prevent this its better have the whole code to execute an SMP
2271 * function under get_online_cpus.
2272 * This function call ensures that hotplug mechanism does not kick-in
2273 * until we are done dealing with online CPUs
2274 * If the hotplug worker is already running, no worries because the
2275 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2277 * The below mentioned code structure is proposed in
2278 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2280 * Q: I need to ensure that a particular cpu is not removed when there is some
2281 * work specific to this cpu is in progress
2283 * According to the documentation calling get_online_cpus is NOT required, if
2284 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2285 * run from Work Queue context we have to call these functions
2287 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2289 struct dhd_info *dhd =
2290 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2294 cpu = atomic_read(&dhd->rx_napi_cpu);
2295 if (!cpu_online(cpu))
2296 dhd_napi_schedule(dhd);
2298 dhd_napi_schedule_on(dhd, cpu);
2303 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2304 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2305 * the packets enqueued into the rx_napi_queue and sendup.
2306 * The producer's rx packet queue is appended to the rx_napi_queue before
2307 * dispatching the rx_napi_struct.
2310 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2312 unsigned long flags;
2313 dhd_info_t *dhd = dhdp->info;
2317 if (dhd->rx_napi_netdev == NULL) {
2318 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2322 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2323 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2325 /* append the producer's queue of packets to the napi's rx process queue */
2326 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2327 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2328 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2331 * If the destination CPU is NOT online or is same as current CPU
2332 * no need to schedule the work
2334 curr_cpu = get_cpu();
2337 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2339 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2340 dhd_napi_schedule(dhd);
2342 schedule_work(&dhd->rx_napi_dispatcher_work);
2347 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2350 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2352 dhd_info_t *dhd = dhdp->info;
2354 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2355 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2356 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2357 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2359 #endif /* DHD_LB_RXP */
2363 static void dhd_memdump_work_handler(struct work_struct * work)
2365 struct dhd_info *dhd =
2366 container_of(work, struct dhd_info, dhd_memdump_work.work);
2370 dhd_prot_collect_memdump(&dhd->pub);
2375 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2376 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2379 dhd_info_t *dhd = dhdp->info;
2382 ASSERT(bssidx < DHD_MAX_IFS);
2385 for (i = 0; i < DHD_MAX_IFS; i++) {
2386 ifp = dhd->iflist[i];
2387 if (ifp && (ifp->bssidx == bssidx)) {
2388 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2389 ifp->name, bssidx, i));
2396 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2402 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2406 dhd_os_rxflock(dhdp);
2407 store_idx = dhdp->store_idx;
2408 sent_idx = dhdp->sent_idx;
2409 if (dhdp->skbbuf[store_idx] != NULL) {
2410 /* Make sure the previous packets are processed */
2411 dhd_os_rxfunlock(dhdp);
2412 #ifdef RXF_DEQUEUE_ON_BUSY
2413 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2414 skb, store_idx, sent_idx));
2416 #else /* RXF_DEQUEUE_ON_BUSY */
2417 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2418 skb, store_idx, sent_idx));
2419 /* removed msleep here, should use wait_event_timeout if we
2420 * want to give rx frame thread a chance to run
2422 #if defined(WAIT_DEQUEUE)
2426 #endif /* RXF_DEQUEUE_ON_BUSY */
2428 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2429 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2430 dhdp->skbbuf[store_idx] = skb;
2431 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2432 dhd_os_rxfunlock(dhdp);
2437 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2443 dhd_os_rxflock(dhdp);
2445 store_idx = dhdp->store_idx;
2446 sent_idx = dhdp->sent_idx;
2447 skb = dhdp->skbbuf[sent_idx];
2450 dhd_os_rxfunlock(dhdp);
2451 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2452 store_idx, sent_idx));
2456 dhdp->skbbuf[sent_idx] = NULL;
2457 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2459 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2462 dhd_os_rxfunlock(dhdp);
2467 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2469 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2471 if (prepost) { /* pre process */
2472 dhd_read_macaddr(dhd);
2473 } else { /* post process */
2474 dhd_write_macaddr(&dhd->pub.mac);
2480 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2482 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2484 bool _apply = FALSE;
2485 /* In case of IBSS mode, apply arp pkt filter */
2486 if (op_mode & DHD_FLAG_IBSS_MODE) {
2490 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2491 if ((dhd->arp_version == 1) &&
2492 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2500 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2502 void dhd_set_packet_filter(dhd_pub_t *dhd)
2504 #ifdef PKT_FILTER_SUPPORT
2507 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2508 if (dhd_pkt_filter_enable) {
2509 for (i = 0; i < dhd->pktfilter_count; i++) {
2510 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2513 #endif /* PKT_FILTER_SUPPORT */
2516 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2518 #ifdef PKT_FILTER_SUPPORT
2521 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2523 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2524 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2527 /* 1 - Enable packet filter, only allow unicast packet to send up */
2528 /* 0 - Disable packet filter */
2529 if (dhd_pkt_filter_enable && (!value ||
2530 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2532 for (i = 0; i < dhd->pktfilter_count; i++) {
2533 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2534 if (value && (i == DHD_ARP_FILTER_NUM) &&
2535 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2536 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2537 "val %d, cnt %d, op_mode 0x%x\n",
2538 value, i, dhd->op_mode));
2541 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2542 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2543 value, dhd_master_mode);
2546 #endif /* PKT_FILTER_SUPPORT */
2549 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2551 #ifndef SUPPORT_PM2_ONLY
2552 int power_mode = PM_MAX;
2553 #endif /* SUPPORT_PM2_ONLY */
2554 #ifdef SUPPORT_SENSORHUB
2556 #endif /* SUPPORT_SENSORHUB */
2557 /* wl_pkt_filter_enable_t enable_parm; */
2559 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2560 #ifdef DHD_USE_EARLYSUSPEND
2561 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2562 int bcn_timeout = 0;
2563 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2564 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2565 int roam_time_thresh = 0; /* (ms) */
2566 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2567 #ifndef ENABLE_FW_ROAM_SUSPEND
2568 uint roamvar = dhd->conf->roam_off_suspend;
2569 #endif /* ENABLE_FW_ROAM_SUSPEND */
2570 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2572 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2573 uint nd_ra_filter = 0;
2575 #endif /* DHD_USE_EARLYSUSPEND */
2576 #ifdef PASS_ALL_MCAST_PKTS
2577 struct dhd_info *dhdinfo;
2580 #endif /* PASS_ALL_MCAST_PKTS */
2581 #ifdef DYNAMIC_SWOOB_DURATION
2582 #ifndef CUSTOM_INTR_WIDTH
2583 #define CUSTOM_INTR_WIDTH 100
2585 #endif /* CUSTOM_INTR_WIDTH */
2586 #endif /* DYNAMIC_SWOOB_DURATION */
2591 #ifdef PASS_ALL_MCAST_PKTS
2592 dhdinfo = dhd->info;
2593 #endif /* PASS_ALL_MCAST_PKTS */
2595 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2596 __FUNCTION__, value, dhd->in_suspend));
2598 dhd_suspend_lock(dhd);
2600 #ifdef CUSTOM_SET_CPUCORE
2601 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2602 /* set specific cpucore */
2603 dhd_set_cpucore(dhd, TRUE);
2604 #endif /* CUSTOM_SET_CPUCORE */
2605 #ifndef SUPPORT_PM2_ONLY
2606 if (dhd->conf->pm >= 0)
2607 power_mode = dhd->conf->pm;
2608 #endif /* SUPPORT_PM2_ONLY */
2610 if (value && dhd->in_suspend) {
2611 #ifdef PKT_FILTER_SUPPORT
2612 dhd->early_suspended = 1;
2614 /* Kernel suspended */
2615 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2617 #ifdef SUPPORT_SENSORHUB
2619 if (dhd->info->shub_enable == 1) {
2620 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2621 iovbuf, sizeof(iovbuf));
2622 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2623 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2624 DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2625 __FUNCTION__, ret));
2628 #endif /* SUPPORT_SENSORHUB */
2630 #ifndef SUPPORT_PM2_ONLY
2631 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2632 sizeof(power_mode), TRUE, 0);
2633 #endif /* SUPPORT_PM2_ONLY */
2635 #ifdef PKT_FILTER_SUPPORT
2636 /* Enable packet filter,
2637 * only allow unicast packet to send up
2639 dhd_enable_packet_filter(1, dhd);
2640 #endif /* PKT_FILTER_SUPPORT */
2642 #ifdef PASS_ALL_MCAST_PKTS
2644 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2645 iovbuf, sizeof(iovbuf));
2646 for (i = 0; i < DHD_MAX_IFS; i++) {
2647 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2648 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2649 sizeof(iovbuf), TRUE, i);
2651 #endif /* PASS_ALL_MCAST_PKTS */
2653 /* If DTIM skip is set up as default, force it to wake
2654 * each third DTIM for better power savings. Note that
2655 * one side effect is a chance to miss BC/MC packet.
2658 /* Do not set bcn_li_ditm on WFD mode */
2659 if (dhd->tdls_mode) {
2663 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2664 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2665 4, iovbuf, sizeof(iovbuf));
2666 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2668 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2670 #ifdef DHD_USE_EARLYSUSPEND
2671 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2672 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2673 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2674 4, iovbuf, sizeof(iovbuf));
2675 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2676 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2677 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2678 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2679 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2680 4, iovbuf, sizeof(iovbuf));
2681 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2682 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2683 #ifndef ENABLE_FW_ROAM_SUSPEND
2684 /* Disable firmware roaming during suspend */
2685 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2686 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2687 #endif /* ENABLE_FW_ROAM_SUSPEND */
2688 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2690 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2691 4, iovbuf, sizeof(iovbuf));
2692 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2693 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2694 if (FW_SUPPORTED(dhd, ndoe)) {
2695 /* enable IPv6 RA filter in firmware during suspend */
2697 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2698 iovbuf, sizeof(iovbuf));
2699 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2700 sizeof(iovbuf), TRUE, 0)) < 0)
2701 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2704 #ifdef DYNAMIC_SWOOB_DURATION
2705 intr_width = CUSTOM_INTR_WIDTH;
2706 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2707 iovbuf, sizeof(iovbuf));
2708 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2709 sizeof(iovbuf), TRUE, 0)) < 0) {
2710 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2712 #endif /* DYNAMIC_SWOOB_DURATION */
2713 #endif /* DHD_USE_EARLYSUSPEND */
2715 #ifdef PKT_FILTER_SUPPORT
2716 dhd->early_suspended = 0;
2718 /* Kernel resumed */
2719 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2721 #ifdef SUPPORT_SENSORHUB
2723 if (dhd->info->shub_enable == 1) {
2724 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2725 4, iovbuf, sizeof(iovbuf));
2726 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2727 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2728 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2729 "failed %d\n", __FUNCTION__, ret));
2732 #endif /* SUPPORT_SENSORHUB */
2735 #ifdef DYNAMIC_SWOOB_DURATION
2737 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2738 iovbuf, sizeof(iovbuf));
2739 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2740 sizeof(iovbuf), TRUE, 0)) < 0) {
2741 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2743 #endif /* DYNAMIC_SWOOB_DURATION */
2744 #ifndef SUPPORT_PM2_ONLY
2745 power_mode = PM_FAST;
2746 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2747 sizeof(power_mode), TRUE, 0);
2748 #endif /* SUPPORT_PM2_ONLY */
2749 #ifdef PKT_FILTER_SUPPORT
2750 /* disable pkt filter */
2751 dhd_enable_packet_filter(0, dhd);
2752 #endif /* PKT_FILTER_SUPPORT */
2753 #ifdef PASS_ALL_MCAST_PKTS
2755 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2756 iovbuf, sizeof(iovbuf));
2757 for (i = 0; i < DHD_MAX_IFS; i++) {
2758 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2759 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2760 sizeof(iovbuf), TRUE, i);
2762 #endif /* PASS_ALL_MCAST_PKTS */
2764 /* restore pre-suspend setting for dtim_skip */
2765 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2766 4, iovbuf, sizeof(iovbuf));
2768 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2769 #ifdef DHD_USE_EARLYSUSPEND
2770 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2771 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2772 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2773 4, iovbuf, sizeof(iovbuf));
2774 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2775 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2776 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2777 roam_time_thresh = 2000;
2778 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2779 4, iovbuf, sizeof(iovbuf));
2780 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2781 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2782 #ifndef ENABLE_FW_ROAM_SUSPEND
2783 roamvar = dhd_roam_disable;
2784 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2785 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2786 #endif /* ENABLE_FW_ROAM_SUSPEND */
2787 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2789 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2790 4, iovbuf, sizeof(iovbuf));
2791 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2792 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2793 if (FW_SUPPORTED(dhd, ndoe)) {
2794 /* disable IPv6 RA filter in firmware during suspend */
2796 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2797 iovbuf, sizeof(iovbuf));
2798 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2799 sizeof(iovbuf), TRUE, 0)) < 0)
2800 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2803 #endif /* DHD_USE_EARLYSUSPEND */
2806 dhd_suspend_unlock(dhd);
2811 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2813 dhd_pub_t *dhdp = &dhd->pub;
2816 DHD_OS_WAKE_LOCK(dhdp);
2817 DHD_PERIM_LOCK(dhdp);
2819 /* Set flag when early suspend was called */
2820 dhdp->in_suspend = val;
2821 if ((force || !dhdp->suspend_disable_flag) &&
2822 dhd_support_sta_mode(dhdp))
2824 ret = dhd_set_suspend(val, dhdp);
2827 DHD_PERIM_UNLOCK(dhdp);
2828 DHD_OS_WAKE_UNLOCK(dhdp);
2832 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2833 static void dhd_early_suspend(struct early_suspend *h)
2835 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2836 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2839 dhd_suspend_resume_helper(dhd, 1, 0);
2842 static void dhd_late_resume(struct early_suspend *h)
2844 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2845 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2848 dhd_suspend_resume_helper(dhd, 0, 0);
2850 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2853 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2854 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2856 * dhd_timeout_start(&tmo, usec);
2857 * while (!dhd_timeout_expired(&tmo))
2858 * if (poll_something())
2860 * if (dhd_timeout_expired(&tmo))
2865 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2870 tmo->tick = jiffies_to_usecs(1);
2874 dhd_timeout_expired(dhd_timeout_t *tmo)
2876 /* Does nothing the first call */
2877 if (tmo->increment == 0) {
2882 if (tmo->elapsed >= tmo->limit)
2885 /* Add the delay that's about to take place */
2886 tmo->elapsed += tmo->increment;
2888 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2889 OSL_DELAY(tmo->increment);
2890 tmo->increment *= 2;
2891 if (tmo->increment > tmo->tick)
2892 tmo->increment = tmo->tick;
2894 wait_queue_head_t delay_wait;
2895 DECLARE_WAITQUEUE(wait, current);
2896 init_waitqueue_head(&delay_wait);
2897 add_wait_queue(&delay_wait, &wait);
2898 set_current_state(TASK_INTERRUPTIBLE);
2899 (void)schedule_timeout(1);
2900 remove_wait_queue(&delay_wait, &wait);
2901 set_current_state(TASK_RUNNING);
2908 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2913 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2917 while (i < DHD_MAX_IFS) {
2918 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2926 struct net_device * dhd_idx2net(void *pub, int ifidx)
2928 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2929 struct dhd_info *dhd_info;
2931 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2933 dhd_info = dhd_pub->info;
2934 if (dhd_info && dhd_info->iflist[ifidx])
2935 return dhd_info->iflist[ifidx]->net;
2940 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2942 int i = DHD_MAX_IFS;
2946 if (name == NULL || *name == '\0')
2950 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2953 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2955 return i; /* default - the primary interface */
2959 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2961 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2965 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2966 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2970 if (dhd->iflist[ifidx] == NULL) {
2971 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2975 if (dhd->iflist[ifidx]->net)
2976 return dhd->iflist[ifidx]->net->name;
2982 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2985 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2988 for (i = 0; i < DHD_MAX_IFS; i++)
2989 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2990 return dhd->iflist[i]->mac_addr;
2997 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2999 struct net_device *dev;
3000 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3001 struct netdev_hw_addr *ha;
3003 struct dev_mc_list *mclist;
3005 uint32 allmulti, cnt;
3012 if (!dhd->iflist[ifidx]) {
3013 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3016 dev = dhd->iflist[ifidx]->net;
3019 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3020 netif_addr_lock_bh(dev);
3021 #endif /* LINUX >= 2.6.27 */
3022 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3023 cnt = netdev_mc_count(dev);
3025 cnt = dev->mc_count;
3026 #endif /* LINUX >= 2.6.35 */
3027 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3028 netif_addr_unlock_bh(dev);
3029 #endif /* LINUX >= 2.6.27 */
3031 /* Determine initial value of allmulti flag */
3032 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3034 #ifdef PASS_ALL_MCAST_PKTS
3035 #ifdef PKT_FILTER_SUPPORT
3036 if (!dhd->pub.early_suspended)
3037 #endif /* PKT_FILTER_SUPPORT */
3039 #endif /* PASS_ALL_MCAST_PKTS */
3041 /* Send down the multicast list first. */
3044 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3045 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3046 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3047 dhd_ifname(&dhd->pub, ifidx), cnt));
3051 strncpy(bufp, "mcast_list", buflen - 1);
3052 bufp[buflen - 1] = '\0';
3053 bufp += strlen("mcast_list") + 1;
3056 memcpy(bufp, &cnt, sizeof(cnt));
3057 bufp += sizeof(cnt);
3059 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3060 netif_addr_lock_bh(dev);
3061 #endif /* LINUX >= 2.6.27 */
3062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3063 netdev_for_each_mc_addr(ha, dev) {
3066 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3067 bufp += ETHER_ADDR_LEN;
3070 #else /* LINUX < 2.6.35 */
3071 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3072 cnt--, mclist = mclist->next) {
3073 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3074 bufp += ETHER_ADDR_LEN;
3076 #endif /* LINUX >= 2.6.35 */
3077 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3078 netif_addr_unlock_bh(dev);
3079 #endif /* LINUX >= 2.6.27 */
3081 memset(&ioc, 0, sizeof(ioc));
3082 ioc.cmd = WLC_SET_VAR;
3087 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3089 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3090 dhd_ifname(&dhd->pub, ifidx), cnt));
3091 allmulti = cnt ? TRUE : allmulti;
3094 MFREE(dhd->pub.osh, buf, buflen);
3096 /* Now send the allmulti setting. This is based on the setting in the
3097 * net_device flags, but might be modified above to be turned on if we
3098 * were trying to set some addresses and dongle rejected it...
3101 buflen = sizeof("allmulti") + sizeof(allmulti);
3102 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3103 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3106 allmulti = htol32(allmulti);
3108 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3109 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3110 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3111 MFREE(dhd->pub.osh, buf, buflen);
3116 memset(&ioc, 0, sizeof(ioc));
3117 ioc.cmd = WLC_SET_VAR;
3122 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3124 DHD_ERROR(("%s: set allmulti %d failed\n",
3125 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3128 MFREE(dhd->pub.osh, buf, buflen);
3130 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3132 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3134 allmulti = htol32(allmulti);
3136 memset(&ioc, 0, sizeof(ioc));
3137 ioc.cmd = WLC_SET_PROMISC;
3138 ioc.buf = &allmulti;
3139 ioc.len = sizeof(allmulti);
3142 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3144 DHD_ERROR(("%s: set promisc %d failed\n",
3145 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3150 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3156 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3157 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3160 memset(&ioc, 0, sizeof(ioc));
3161 ioc.cmd = WLC_SET_VAR;
3166 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3168 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3170 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3172 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3179 extern struct net_device *ap_net_dev;
3180 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3184 /* Get psta/psr configuration configuration */
3185 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3187 dhd_info_t *dhd = dhdp->info;
3188 return (int)dhd->psta_mode;
3190 /* Set psta/psr configuration configuration */
3191 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3193 dhd_info_t *dhd = dhdp->info;
3194 dhd->psta_mode = val;
3197 #endif /* DHD_PSTA */
3200 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3202 dhd_info_t *dhd = handle;
3203 dhd_if_event_t *if_event = event_info;
3204 struct net_device *ndev;
3207 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3208 struct wireless_dev *vwdev, *primary_wdev;
3209 struct net_device *primary_ndev;
3210 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3212 if (event != DHD_WQ_WORK_IF_ADD) {
3213 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3218 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3223 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3227 dhd_net_if_lock_local(dhd);
3228 DHD_OS_WAKE_LOCK(&dhd->pub);
3229 DHD_PERIM_LOCK(&dhd->pub);
3231 ifidx = if_event->event.ifidx;
3232 bssidx = if_event->event.bssidx;
3233 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3235 /* This path is for non-android case */
3236 /* The interface name in host and in event msg are same */
3237 /* if name in event msg is used to create dongle if list on host */
3238 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3239 if_event->mac, bssidx, TRUE, if_event->name);
3241 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3245 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3246 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3247 if (unlikely(!vwdev)) {
3248 DHD_ERROR(("Could not allocate wireless device\n"));
3251 primary_ndev = dhd->pub.info->iflist[0]->net;
3252 primary_wdev = ndev_to_wdev(primary_ndev);
3253 vwdev->wiphy = primary_wdev->wiphy;
3254 vwdev->iftype = if_event->event.role;
3255 vwdev->netdev = ndev;
3256 ndev->ieee80211_ptr = vwdev;
3257 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3258 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3259 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3261 DHD_PERIM_UNLOCK(&dhd->pub);
3262 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3263 DHD_PERIM_LOCK(&dhd->pub);
3264 if (ret != BCME_OK) {
3265 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3266 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3269 #ifdef PCIE_FULL_DONGLE
3270 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3271 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3272 char iovbuf[WLC_IOCTL_SMLEN];
3275 memset(iovbuf, 0, sizeof(iovbuf));
3276 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3277 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3279 if (ret != BCME_OK) {
3280 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3281 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3284 #endif /* PCIE_FULL_DONGLE */
3287 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3289 DHD_PERIM_UNLOCK(&dhd->pub);
3290 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3291 dhd_net_if_unlock_local(dhd);
3295 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3297 dhd_info_t *dhd = handle;
3299 dhd_if_event_t *if_event = event_info;
3302 if (event != DHD_WQ_WORK_IF_DEL) {
3303 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3308 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3313 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3317 dhd_net_if_lock_local(dhd);
3318 DHD_OS_WAKE_LOCK(&dhd->pub);
3319 DHD_PERIM_LOCK(&dhd->pub);
3321 ifidx = if_event->event.ifidx;
3322 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3324 DHD_PERIM_UNLOCK(&dhd->pub);
3325 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3326 DHD_PERIM_LOCK(&dhd->pub);
3328 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3330 DHD_PERIM_UNLOCK(&dhd->pub);
3331 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3332 dhd_net_if_unlock_local(dhd);
3336 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3338 dhd_info_t *dhd = handle;
3339 dhd_if_t *ifp = event_info;
3341 if (event != DHD_WQ_WORK_SET_MAC) {
3342 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3346 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3350 dhd_net_if_lock_local(dhd);
3351 DHD_OS_WAKE_LOCK(&dhd->pub);
3352 DHD_PERIM_LOCK(&dhd->pub);
3356 unsigned long flags;
3358 DHD_GENERAL_LOCK(&dhd->pub, flags);
3359 in_ap = (ap_net_dev != NULL);
3360 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3363 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3370 if (ifp == NULL || !dhd->pub.up) {
3371 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3375 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3376 ifp->set_macaddress = FALSE;
3377 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3378 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3380 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3383 DHD_PERIM_UNLOCK(&dhd->pub);
3384 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3385 dhd_net_if_unlock_local(dhd);
3389 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3391 dhd_info_t *dhd = handle;
3392 dhd_if_t *ifp = event_info;
3395 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3396 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3401 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3405 dhd_net_if_lock_local(dhd);
3406 DHD_OS_WAKE_LOCK(&dhd->pub);
3407 DHD_PERIM_LOCK(&dhd->pub);
3412 unsigned long flags;
3413 DHD_GENERAL_LOCK(&dhd->pub, flags);
3414 in_ap = (ap_net_dev != NULL);
3415 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3418 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3420 ifp->set_multicast = FALSE;
3426 if (ifp == NULL || !dhd->pub.up) {
3427 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3434 _dhd_set_multicast_list(dhd, ifidx);
3435 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3438 DHD_PERIM_UNLOCK(&dhd->pub);
3439 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3440 dhd_net_if_unlock_local(dhd);
3444 dhd_set_mac_address(struct net_device *dev, void *addr)
3448 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3449 struct sockaddr *sa = (struct sockaddr *)addr;
3453 ifidx = dhd_net2idx(dhd, dev);
3454 if (ifidx == DHD_BAD_IF)
3457 dhdif = dhd->iflist[ifidx];
3459 dhd_net_if_lock_local(dhd);
3460 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3461 dhdif->set_macaddress = TRUE;
3462 dhd_net_if_unlock_local(dhd);
3463 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3464 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3469 dhd_set_multicast_list(struct net_device *dev)
3471 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3474 ifidx = dhd_net2idx(dhd, dev);
3475 if (ifidx == DHD_BAD_IF)
3478 dhd->iflist[ifidx]->set_multicast = TRUE;
3479 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3480 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
3483 #ifdef PROP_TXSTATUS
3485 dhd_os_wlfc_block(dhd_pub_t *pub)
3487 dhd_info_t *di = (dhd_info_t *)(pub->info);
3489 spin_lock_bh(&di->wlfc_spinlock);
3494 dhd_os_wlfc_unblock(dhd_pub_t *pub)
3496 dhd_info_t *di = (dhd_info_t *)(pub->info);
3499 spin_unlock_bh(&di->wlfc_spinlock);
3503 #endif /* PROP_TXSTATUS */
3505 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
3511 static const PKTTYPE_INFO packet_type_info[] =
3513 { ETHER_TYPE_IP, "IP" },
3514 { ETHER_TYPE_ARP, "ARP" },
3515 { ETHER_TYPE_BRCM, "BRCM" },
3516 { ETHER_TYPE_802_1X, "802.1X" },
3517 { ETHER_TYPE_WAI, "WAPI" },
3521 static const char *_get_packet_type_str(uint16 type)
3524 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
3526 for (i = 0; i < n; i++) {
3527 if (packet_type_info[i].type == type)
3528 return packet_type_info[i].str;
3531 return packet_type_info[n].str;
3533 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
3535 #if defined(DHD_TX_DUMP)
3537 dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
3543 dump_data = PKTDATA(osh, pkt);
3544 protocol = (dump_data[12] << 8) | dump_data[13];
3545 ifname = ndev ? ndev->name : "N/A";
3547 DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
3549 if (protocol == ETHER_TYPE_802_1X) {
3550 dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
3553 #if defined(DHD_TX_FULL_DUMP)
3557 datalen = PKTLEN(osh, pkt);
3559 for (i = 0; i < datalen; i++) {
3560 printk("%02X ", dump_data[i]);
3566 #endif /* DHD_TX_FULL_DUMP */
3568 #endif /* DHD_TX_DUMP */
3570 /* This routine do not support Packet chain feature, Currently tested for
3573 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3575 struct sk_buff *skb;
3576 void *skbhead = NULL;
3577 void *skbprev = NULL;
3579 ASSERT(!PKTISCHAINED(p));
3580 skb = PKTTONATIVE(dhdp->osh, p);
3582 ifp = dhdp->info->iflist[ifidx];
3583 skb->dev = ifp->net;
3584 #if defined(BCM_GMAC3)
3585 /* Forwarder capable interfaces use WOFA based forwarding */
3587 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
3588 uint16 * da = (uint16 *)(eh->ether_dhost);
3590 ASSERT(ISALIGNED(da, 2));
3592 wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
3593 if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
3594 if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
3598 PKTFRMNATIVE(dhdp->osh, p);
3599 PKTFREE(dhdp->osh, p, FALSE);
3602 #endif /* BCM_GMAC3 */
3604 skb->protocol = eth_type_trans(skb, skb->dev);
3606 if (in_interrupt()) {
3607 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3608 __FUNCTION__, __LINE__);
3611 if (dhdp->info->rxthread_enabled) {
3615 PKTSETNEXT(dhdp->osh, skbprev, skb);
3619 /* If the receive is not processed inside an ISR,
3620 * the softirqd must be woken explicitly to service
3621 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3622 * by netif_rx_ni(), but in earlier kernels, we need
3623 * to do it manually.
3625 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3626 __FUNCTION__, __LINE__);
3627 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3632 local_irq_save(flags);
3634 local_irq_restore(flags);
3635 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3639 if (dhdp->info->rxthread_enabled && skbhead)
3640 dhd_sched_rxf(dhdp, skbhead);
3646 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3649 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3650 struct ether_header *eh = NULL;
3651 #ifdef DHD_L2_FILTER
3652 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3654 #ifdef DHD_8021X_DUMP
3655 struct net_device *ndev;
3656 #endif /* DHD_8021X_DUMP */
3658 /* Reject if down */
3659 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3660 /* free the packet here since the caller won't */
3661 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3665 #ifdef PCIE_FULL_DONGLE
3666 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3667 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3668 PKTFREE(dhdp->osh, pktbuf, TRUE);
3671 #endif /* PCIE_FULL_DONGLE */
3673 #ifdef DHD_L2_FILTER
3674 /* if dhcp_unicast is enabled, we need to convert the */
3675 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3676 if (ifp->dhcp_unicast) {
3678 uint8* ehptr = NULL;
3680 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3681 if (ret == BCME_OK) {
3682 /* if given mac address having valid entry in sta list
3683 * copy the given mac address, and return with BCME_OK
3685 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3686 ehptr = PKTDATA(dhdp->osh, pktbuf);
3687 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3692 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3693 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3694 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3699 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3700 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3702 /* Drop the packets if l2 filter has processed it already
3703 * otherwise continue with the normal path
3705 if (ret == BCME_OK) {
3706 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3710 #endif /* DHD_L2_FILTER */
3711 /* Update multicast statistic */
3712 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3713 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3714 eh = (struct ether_header *)pktdata;
3716 if (ETHER_ISMULTI(eh->ether_dhost))
3717 dhdp->tx_multicast++;
3718 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
3719 atomic_inc(&dhd->pend_8021x_cnt);
3720 #ifdef DHD_DHCP_DUMP
3721 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3725 uint16 udp_port_pos;
3726 uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
3727 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3728 struct net_device *net;
3731 net = dhd_idx2net(dhdp, ifidx);
3732 ifname = net ? net->name : "N/A";
3733 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3734 source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
3735 dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
3736 if (source_port == 0x0044 || dest_port == 0x0044) {
3737 dump_hex = (pktdata[udp_port_pos+249] << 8) |
3738 pktdata[udp_port_pos+250];
3739 if (dump_hex == 0x0101) {
3740 DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
3741 } else if (dump_hex == 0x0102) {
3742 DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
3743 } else if (dump_hex == 0x0103) {
3744 DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
3745 } else if (dump_hex == 0x0105) {
3746 DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
3748 DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
3750 #ifdef DHD_LOSSLESS_ROAMING
3751 if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
3752 DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
3754 #endif /* DHD_LOSSLESS_ROAMING */
3756 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3757 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
3760 #endif /* DHD_DHCP_DUMP */
3762 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3766 /* Look into the packet and update the packet priority */
3767 #ifndef PKTPRIO_OVERRIDE
3768 if (PKTPRIO(pktbuf) == 0)
3769 #endif /* !PKTPRIO_OVERRIDE */
3772 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3774 pktsetprio(pktbuf, FALSE);
3775 #endif /* QOS_MAP_SET */
3779 #ifdef PCIE_FULL_DONGLE
3781 * Lkup the per interface hash table, for a matching flowring. If one is not
3782 * available, allocate a unique flowid and add a flowring entry.
3783 * The found or newly created flowid is placed into the pktbuf's tag.
3785 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3786 if (ret != BCME_OK) {
3787 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3792 #if defined(DHD_TX_DUMP)
3793 ndev = dhd_idx2net(dhdp, ifidx);
3794 dhd_tx_dump(ndev, dhdp->osh, pktbuf);
3796 /* terence 20150901: Micky add to ajust the 802.1X priority */
3797 /* Set the 802.1X packet with the highest priority 7 */
3798 if (dhdp->conf->pktprio8021x >= 0)
3799 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3801 #ifdef PROP_TXSTATUS
3802 if (dhd_wlfc_is_supported(dhdp)) {
3803 /* store the interface ID */
3804 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3806 /* store destination MAC in the tag as well */
3807 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3809 /* decide which FIFO this packet belongs to */
3810 if (ETHER_ISMULTI(eh->ether_dhost))
3811 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3812 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3814 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3816 #endif /* PROP_TXSTATUS */
3818 /* If the protocol uses a data header, apply it */
3819 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3822 /* Use bus module to send data frame */
3824 dhd_htsf_addtxts(dhdp, pktbuf);
3826 #ifdef PROP_TXSTATUS
3828 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3829 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3830 /* non-proptxstatus way */
3832 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3834 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3835 #endif /* BCMPCIE */
3840 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3842 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3843 #endif /* BCMPCIE */
3844 #endif /* PROP_TXSTATUS */
3850 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3853 unsigned long flags;
3855 DHD_GENERAL_LOCK(dhdp, flags);
3856 if (dhdp->busstate == DHD_BUS_DOWN ||
3857 dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3858 DHD_ERROR(("%s: returning as busstate=%d\n",
3859 __FUNCTION__, dhdp->busstate));
3860 DHD_GENERAL_UNLOCK(dhdp, flags);
3861 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3864 dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
3865 DHD_GENERAL_UNLOCK(dhdp, flags);
3867 #ifdef DHD_PCIE_RUNTIMEPM
3868 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3869 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3870 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3874 #endif /* DHD_PCIE_RUNTIMEPM */
3876 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3878 #ifdef DHD_PCIE_RUNTIMEPM
3881 DHD_GENERAL_LOCK(dhdp, flags);
3882 dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
3883 DHD_GENERAL_UNLOCK(dhdp, flags);
3888 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3893 dhd_info_t *dhd = DHD_DEV_INFO(net);
3894 dhd_if_t *ifp = NULL;
3896 unsigned long flags;
3898 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3900 uint8 htsfdlystat_sz = 0;
3903 struct ether_header *eh;
3905 #endif /* DHD_WMF */
3907 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3910 #ifdef PCIE_FULL_DONGLE
3911 DHD_GENERAL_LOCK(&dhd->pub, flags);
3912 dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
3913 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3914 #endif /* PCIE_FULL_DONGLE */
3916 #ifdef DHD_PCIE_RUNTIMEPM
3917 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3918 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3919 /* stop the network queue temporarily until resume done */
3920 DHD_GENERAL_LOCK(&dhd->pub, flags);
3921 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3922 dhd_bus_stop_queue(dhd->pub.bus);
3924 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3925 dhd_os_busbusy_wake(&dhd->pub);
3926 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3927 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3930 return NETDEV_TX_BUSY;
3933 #endif /* DHD_PCIE_RUNTIMEPM */
3935 DHD_GENERAL_LOCK(&dhd->pub, flags);
3936 #ifdef PCIE_FULL_DONGLE
3937 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3938 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3939 dhd_os_busbusy_wake(&dhd->pub);
3940 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3941 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3944 return NETDEV_TX_BUSY;
3947 #endif /* PCIE_FULL_DONGLE */
3949 DHD_OS_WAKE_LOCK(&dhd->pub);
3950 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3952 /* Reject if down */
3953 if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
3954 dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3955 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3956 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3957 netif_stop_queue(net);
3958 /* Send Event when bus down detected during data session */
3959 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3960 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3961 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3962 net_os_send_hang_message(net);
3964 #ifdef PCIE_FULL_DONGLE
3965 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3966 dhd_os_busbusy_wake(&dhd->pub);
3967 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3968 #endif /* PCIE_FULL_DONGLE */
3969 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3970 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3971 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3974 return NETDEV_TX_BUSY;
3978 ifp = DHD_DEV_IFP(net);
3979 ifidx = DHD_DEV_IFIDX(net);
3980 BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
3982 if (ifidx == DHD_BAD_IF) {
3983 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
3984 netif_stop_queue(net);
3985 #ifdef PCIE_FULL_DONGLE
3986 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3987 dhd_os_busbusy_wake(&dhd->pub);
3988 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3989 #endif /* PCIE_FULL_DONGLE */
3990 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3991 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3992 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3995 return NETDEV_TX_BUSY;
3998 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4000 ASSERT(ifidx == dhd_net2idx(dhd, net));
4001 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4003 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4005 /* re-align socket buffer if "skb->data" is odd address */
4006 if (((unsigned long)(skb->data)) & 0x1) {
4007 unsigned char *data = skb->data;
4008 uint32 length = skb->len;
4009 PKTPUSH(dhd->pub.osh, skb, 1);
4010 memmove(skb->data, data, length);
4011 PKTSETLEN(dhd->pub.osh, skb, length);
4014 datalen = PKTLEN(dhd->pub.osh, skb);
4016 /* Make sure there's enough room for any header */
4017 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4018 struct sk_buff *skb2;
4020 DHD_INFO(("%s: insufficient headroom\n",
4021 dhd_ifname(&dhd->pub, ifidx)));
4022 dhd->pub.tx_realloc++;
4024 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4025 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4028 if ((skb = skb2) == NULL) {
4029 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4030 dhd_ifname(&dhd->pub, ifidx)));
4034 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4037 /* Convert to packet */
4038 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4039 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4040 dhd_ifname(&dhd->pub, ifidx)));
4041 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4042 dev_kfree_skb_any(skb);
4047 #if defined(WLMEDIA_HTSF)
4048 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
4049 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
4050 struct ether_header *eh = (struct ether_header *)pktdata;
4052 if (!ETHER_ISMULTI(eh->ether_dhost) &&
4053 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
4054 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
4060 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4061 iph = (uint8 *)eh + ETHER_HDR_LEN;
4063 /* WMF processing for multicast packets
4064 * Only IPv4 packets are handled
4066 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4067 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4068 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4069 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4071 bool ucast_convert = FALSE;
4072 #ifdef DHD_UCAST_UPNP
4075 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4076 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4077 #endif /* DHD_UCAST_UPNP */
4078 #ifdef DHD_IGMP_UCQUERY
4079 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4080 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
4081 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4082 #endif /* DHD_IGMP_UCQUERY */
4083 if (ucast_convert) {
4085 #ifdef PCIE_FULL_DONGLE
4086 unsigned long flags;
4088 struct list_head snapshot_list;
4089 struct list_head *wmf_ucforward_list;
4093 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
4094 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4096 wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4098 /* Convert upnp/igmp query to unicast for each assoc STA */
4099 list_for_each_entry(sta, wmf_ucforward_list, list) {
4100 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4104 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4106 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4108 #ifdef PCIE_FULL_DONGLE
4109 DHD_GENERAL_LOCK(&dhd->pub, flags);
4110 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4111 dhd_os_busbusy_wake(&dhd->pub);
4112 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4113 #endif /* PCIE_FULL_DONGLE */
4114 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4115 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4117 if (ret == NETDEV_TX_OK)
4118 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4122 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4124 /* There will be no STA info if the packet is coming from LAN host
4127 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4131 /* Either taken by WMF or we should drop it.
4134 #ifdef PCIE_FULL_DONGLE
4135 DHD_GENERAL_LOCK(&dhd->pub, flags);
4136 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4137 dhd_os_busbusy_wake(&dhd->pub);
4138 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4139 #endif /* PCIE_FULL_DONGLE */
4140 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4141 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4142 return NETDEV_TX_OK;
4144 /* Continue the transmit path */
4149 #endif /* DHD_WMF */
4151 /* PSR related packet proto manipulation should be done in DHD
4152 * since dongle doesn't have complete payload
4154 if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
4155 ifidx, &pktbuf, TRUE) < 0)) {
4156 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4157 dhd_ifname(&dhd->pub, ifidx)));
4159 #endif /* DHD_PSTA */
4161 #ifdef DHDTCPACK_SUPPRESS
4162 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4163 /* If this packet has been hold or got freed, just return */
4164 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4169 /* If this packet has replaced another packet and got freed, just return */
4170 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4175 #endif /* DHDTCPACK_SUPPRESS */
4177 /* no segmented SKB support (Kernel-3.18.y) */
4178 if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
4179 PKTSETLINK(skb, NULL);
4182 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4186 ifp->stats.tx_dropped++;
4187 dhd->pub.tx_dropped++;
4190 #ifdef PROP_TXSTATUS
4191 /* tx_packets counter can counted only when wlfc is disabled */
4192 if (!dhd_wlfc_is_supported(&dhd->pub))
4195 dhd->pub.tx_packets++;
4196 ifp->stats.tx_packets++;
4197 ifp->stats.tx_bytes += datalen;
4201 #ifdef PCIE_FULL_DONGLE
4202 DHD_GENERAL_LOCK(&dhd->pub, flags);
4203 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4204 dhd_os_busbusy_wake(&dhd->pub);
4205 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4206 #endif /* PCIE_FULL_DONGLE */
4208 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4209 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4210 BUZZZ_LOG(START_XMIT_END, 0);
4212 /* Return ok: we always eat the packet */
4213 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4216 return NETDEV_TX_OK;
4222 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4224 struct net_device *net;
4225 dhd_info_t *dhd = dhdp->info;
4228 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4232 #ifdef DHD_LOSSLESS_ROAMING
4233 /* block flowcontrol during roaming */
4234 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4239 if (ifidx == ALL_INTERFACES) {
4240 /* Flow control on all active interfaces */
4241 dhdp->txoff = state;
4242 for (i = 0; i < DHD_MAX_IFS; i++) {
4243 if (dhd->iflist[i]) {
4244 net = dhd->iflist[i]->net;
4246 netif_stop_queue(net);
4248 netif_wake_queue(net);
4252 if (dhd->iflist[ifidx]) {
4253 net = dhd->iflist[ifidx]->net;
4255 netif_stop_queue(net);
4257 netif_wake_queue(net);
4265 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4267 dhd_info_t *dhd = dhdp->info;
4269 return dhd->rxthread_enabled;
4271 #endif /* DHD_WMF */
4273 /** Called when a frame is received by the dongle on interface 'ifidx' */
4275 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4277 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4278 struct sk_buff *skb;
4281 void *data, *pnext = NULL;
4284 wl_event_msg_t event;
4287 void *skbhead = NULL;
4288 void *skbprev = NULL;
4289 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4293 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4295 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4297 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4298 struct ether_header *eh;
4300 pnext = PKTNEXT(dhdp->osh, pktbuf);
4301 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4303 ifp = dhd->iflist[ifidx];
4305 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4307 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4311 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4313 /* Dropping only data packets before registering net device to avoid kernel panic */
4314 #ifndef PROP_TXSTATUS_VSDB
4315 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4316 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4318 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4319 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4320 #endif /* PROP_TXSTATUS_VSDB */
4322 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4324 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4329 #ifdef PROP_TXSTATUS
4330 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4331 /* WLFC may send header only packet when
4332 there is an urgent message but no packet to
4335 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4339 #ifdef DHD_L2_FILTER
4340 /* If block_ping is enabled drop the ping packet */
4341 if (ifp->block_ping) {
4342 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4343 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4347 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4348 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4349 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4353 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4354 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4356 /* Drop the packets if l2 filter has processed it already
4357 * otherwise continue with the normal path
4359 if (ret == BCME_OK) {
4360 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4364 #endif /* DHD_L2_FILTER */
4366 /* WMF processing for multicast packets */
4367 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
4371 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
4372 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
4375 /* The packet is taken by WMF. Continue to next iteration */
4378 /* Packet DROP decision by WMF. Toss it */
4379 DHD_ERROR(("%s: WMF decides to drop packet\n",
4381 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4384 /* Continue the transmit path */
4388 #endif /* DHD_WMF */
4390 #ifdef DHDTCPACK_SUPPRESS
4391 dhd_tcpdata_info_get(dhdp, pktbuf);
4393 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4396 skb->dev = ifp->net;
4399 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4400 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4401 dhd_ifname(dhdp, ifidx)));
4403 #endif /* DHD_PSTA */
4405 #ifdef PCIE_FULL_DONGLE
4406 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4407 (!ifp->ap_isolate)) {
4408 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4409 if (ETHER_ISUCAST(eh->ether_dhost)) {
4410 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4411 dhd_sendpkt(dhdp, ifidx, pktbuf);
4415 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4417 dhd_sendpkt(dhdp, ifidx, npktbuf);
4420 #endif /* PCIE_FULL_DONGLE */
4422 /* Get the protocol, maintain skb around eth_type_trans()
4423 * The main reason for this hack is for the limitation of
4424 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4425 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4426 * coping of the packet coming from the network stack to add
4427 * BDC, Hardware header etc, during network interface registration
4428 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4429 * for BDC, Hardware header etc. and not just the ETH_HLEN
4434 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4435 dump_data = skb->data;
4436 protocol = (dump_data[12] << 8) | dump_data[13];
4437 ifname = skb->dev ? skb->dev->name : "N/A";
4438 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4439 #ifdef DHD_8021X_DUMP
4440 if (protocol == ETHER_TYPE_802_1X) {
4441 dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
4443 #endif /* DHD_8021X_DUMP */
4444 #ifdef DHD_DHCP_DUMP
4445 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
4449 uint16 udp_port_pos;
4450 uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
4451 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
4453 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
4454 source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
4455 dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
4456 if (source_port == 0x0044 || dest_port == 0x0044) {
4457 dump_hex = (dump_data[udp_port_pos+249] << 8) |
4458 dump_data[udp_port_pos+250];
4459 if (dump_hex == 0x0101) {
4460 DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
4461 } else if (dump_hex == 0x0102) {
4462 DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
4463 } else if (dump_hex == 0x0103) {
4464 DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
4465 } else if (dump_hex == 0x0105) {
4466 DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
4468 DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
4470 } else if (source_port == 0x0043 || dest_port == 0x0043) {
4471 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
4474 #endif /* DHD_DHCP_DUMP */
4475 #if defined(DHD_RX_DUMP)
4476 DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
4477 if (protocol != ETHER_TYPE_BRCM) {
4478 if (dump_data[0] == 0xFF) {
4479 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
4481 if ((dump_data[12] == 8) &&
4482 (dump_data[13] == 6)) {
4483 DHD_ERROR(("%s: ARP %d\n",
4484 __FUNCTION__, dump_data[0x15]));
4486 } else if (dump_data[0] & 1) {
4487 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
4488 __FUNCTION__, MAC2STRDBG(dump_data)));
4490 #ifdef DHD_RX_FULL_DUMP
4493 for (k = 0; k < skb->len; k++) {
4494 printk("%02X ", dump_data[k]);
4500 #endif /* DHD_RX_FULL_DUMP */
4502 #endif /* DHD_RX_DUMP */
4504 skb->protocol = eth_type_trans(skb, skb->dev);
4506 if (skb->pkt_type == PACKET_MULTICAST) {
4507 dhd->pub.rx_multicast++;
4508 ifp->stats.multicast++;
4515 dhd_htsf_addrxts(dhdp, pktbuf);
4517 /* Strip header, count, deliver upward */
4518 skb_pull(skb, ETH_HLEN);
4520 /* Process special event packets and then discard them */
4521 memset(&event, 0, sizeof(event));
4522 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4523 dhd_wl_host_event(dhd, &ifidx,
4524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4525 skb_mac_header(skb),
4528 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4532 wl_event_to_host_order(&event);
4534 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4536 #if defined(PNO_SUPPORT)
4537 if (event.event_type == WLC_E_PFN_NET_FOUND) {
4538 /* enforce custom wake lock to garantee that Kernel not suspended */
4539 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4541 #endif /* PNO_SUPPORT */
4543 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
4544 #ifdef DHD_USE_STATIC_CTRLBUF
4545 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4547 PKTFREE(dhdp->osh, pktbuf, FALSE);
4548 #endif /* DHD_USE_STATIC_CTRLBUF */
4550 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
4552 tout_rx = DHD_PACKET_TIMEOUT_MS;
4554 #ifdef PROP_TXSTATUS
4555 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4556 #endif /* PROP_TXSTATUS */
4559 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4560 ifp = dhd->iflist[ifidx];
4563 ifp->net->last_rx = jiffies;
4565 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4566 dhdp->dstats.rx_bytes += skb->len;
4567 dhdp->rx_packets++; /* Local count */
4568 ifp->stats.rx_bytes += skb->len;
4569 ifp->stats.rx_packets++;
4572 if (in_interrupt()) {
4573 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4574 __FUNCTION__, __LINE__);
4575 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4576 #if defined(DHD_LB) && defined(DHD_LB_RXP)
4577 netif_receive_skb(skb);
4580 #endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4581 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4583 if (dhd->rxthread_enabled) {
4587 PKTSETNEXT(dhdp->osh, skbprev, skb);
4591 /* If the receive is not processed inside an ISR,
4592 * the softirqd must be woken explicitly to service
4593 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4594 * by netif_rx_ni(), but in earlier kernels, we need
4595 * to do it manually.
4597 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4598 __FUNCTION__, __LINE__);
4600 #if defined(DHD_LB) && defined(DHD_LB_RXP)
4601 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4602 netif_receive_skb(skb);
4603 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4606 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4608 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4611 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4613 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4614 local_irq_save(flags);
4616 local_irq_restore(flags);
4617 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4618 #endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4623 if (dhd->rxthread_enabled && skbhead)
4624 dhd_sched_rxf(dhdp, skbhead);
4626 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4627 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4628 DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
4632 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4634 /* Linux version has nothing to do */
4639 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4641 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4642 struct ether_header *eh;
4645 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4647 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4648 type = ntoh16(eh->ether_type);
4650 if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
4651 atomic_dec(&dhd->pend_8021x_cnt);
4653 #ifdef PROP_TXSTATUS
4654 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4655 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4656 uint datalen = PKTLEN(dhd->pub.osh, txp);
4659 dhd->pub.tx_packets++;
4660 ifp->stats.tx_packets++;
4661 ifp->stats.tx_bytes += datalen;
4663 ifp->stats.tx_dropped++;
4670 static struct net_device_stats *
4671 dhd_get_stats(struct net_device *net)
4673 dhd_info_t *dhd = DHD_DEV_INFO(net);
4677 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4679 ifidx = dhd_net2idx(dhd, net);
4680 if (ifidx == DHD_BAD_IF) {
4681 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4683 memset(&net->stats, 0, sizeof(net->stats));
4687 ifp = dhd->iflist[ifidx];
4691 /* Use the protocol to get dongle stats */
4692 dhd_prot_dstats(&dhd->pub);
4698 dhd_watchdog_thread(void *data)
4700 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4701 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4702 /* This thread doesn't need any user-level access,
4703 * so get rid of all our resources
4705 if (dhd_watchdog_prio > 0) {
4706 struct sched_param param;
4707 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
4708 dhd_watchdog_prio:(MAX_RT_PRIO-1);
4709 setScheduler(current, SCHED_FIFO, ¶m);
4713 if (down_interruptible (&tsk->sema) == 0) {
4714 unsigned long flags;
4715 unsigned long jiffies_at_start = jiffies;
4716 unsigned long time_lapse;
4718 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4719 SMP_RD_BARRIER_DEPENDS();
4720 if (tsk->terminated) {
4724 if (dhd->pub.dongle_reset == FALSE) {
4725 DHD_TIMER(("%s:\n", __FUNCTION__));
4726 dhd_bus_watchdog(&dhd->pub);
4728 DHD_GENERAL_LOCK(&dhd->pub, flags);
4729 /* Count the tick for reference */
4731 #ifdef DHD_L2_FILTER
4732 dhd_l2_filter_watchdog(&dhd->pub);
4733 #endif /* DHD_L2_FILTER */
4734 time_lapse = jiffies - jiffies_at_start;
4736 /* Reschedule the watchdog */
4737 if (dhd->wd_timer_valid) {
4738 mod_timer(&dhd->timer,
4740 msecs_to_jiffies(dhd_watchdog_ms) -
4741 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
4743 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4745 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4751 complete_and_exit(&tsk->completed, 0);
4754 static void dhd_watchdog(ulong data)
4756 dhd_info_t *dhd = (dhd_info_t *)data;
4757 unsigned long flags;
4759 if (dhd->pub.dongle_reset) {
4763 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4764 DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
4768 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4769 up(&dhd->thr_wdt_ctl.sema);
4773 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4774 /* Call the bus module watchdog */
4775 dhd_bus_watchdog(&dhd->pub);
4776 DHD_GENERAL_LOCK(&dhd->pub, flags);
4777 /* Count the tick for reference */
4780 #ifdef DHD_L2_FILTER
4781 dhd_l2_filter_watchdog(&dhd->pub);
4782 #endif /* DHD_L2_FILTER */
4783 /* Reschedule the watchdog */
4784 if (dhd->wd_timer_valid)
4785 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
4786 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4787 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4790 #ifdef DHD_PCIE_RUNTIMEPM
4792 dhd_rpm_state_thread(void *data)
4794 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4795 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4798 if (down_interruptible (&tsk->sema) == 0) {
4799 unsigned long flags;
4800 unsigned long jiffies_at_start = jiffies;
4801 unsigned long time_lapse;
4803 SMP_RD_BARRIER_DEPENDS();
4804 if (tsk->terminated) {
4808 if (dhd->pub.dongle_reset == FALSE) {
4809 DHD_TIMER(("%s:\n", __FUNCTION__));
4811 dhd_runtimepm_state(&dhd->pub);
4814 DHD_GENERAL_LOCK(&dhd->pub, flags);
4815 time_lapse = jiffies - jiffies_at_start;
4817 /* Reschedule the watchdog */
4818 if (dhd->rpm_timer_valid) {
4819 mod_timer(&dhd->rpm_timer,
4821 msecs_to_jiffies(dhd_runtimepm_ms) -
4822 min(msecs_to_jiffies(dhd_runtimepm_ms),
4825 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4832 complete_and_exit(&tsk->completed, 0);
4835 static void dhd_runtimepm(ulong data)
4837 dhd_info_t *dhd = (dhd_info_t *)data;
4839 if (dhd->pub.dongle_reset) {
4843 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
4844 up(&dhd->thr_rpm_ctl.sema);
4849 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
4851 dhd_os_runtimepm_timer(dhdp, 0);
4852 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
4853 DHD_ERROR(("DHD Runtime PM Disabled \n"));
4856 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
4858 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
4859 DHD_ERROR(("DHD Runtime PM Enabled \n"));
4862 #endif /* DHD_PCIE_RUNTIMEPM */
4865 #ifdef ENABLE_ADAPTIVE_SCHED
4867 dhd_sched_policy(int prio)
4869 struct sched_param param;
4870 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
4871 param.sched_priority = 0;
4872 setScheduler(current, SCHED_NORMAL, ¶m);
4874 if (get_scheduler_policy(current) != SCHED_FIFO) {
4875 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
4876 setScheduler(current, SCHED_FIFO, ¶m);
4880 #endif /* ENABLE_ADAPTIVE_SCHED */
4881 #ifdef DEBUG_CPU_FREQ
4882 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
4884 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
4885 struct cpufreq_freqs *freq = data;
4889 if (val == CPUFREQ_POSTCHANGE) {
4890 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
4891 freq->new, freq->cpu));
4892 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
4898 #endif /* DEBUG_CPU_FREQ */
4900 dhd_dpc_thread(void *data)
4902 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4903 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4905 /* This thread doesn't need any user-level access,
4906 * so get rid of all our resources
4908 if (dhd_dpc_prio > 0)
4910 struct sched_param param;
4911 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
4912 setScheduler(current, SCHED_FIFO, ¶m);
4915 #ifdef CUSTOM_DPC_CPUCORE
4916 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
4918 if (dhd->pub.conf->dpc_cpucore >= 0) {
4919 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
4920 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
4923 #ifdef CUSTOM_SET_CPUCORE
4924 dhd->pub.current_dpc = current;
4925 #endif /* CUSTOM_SET_CPUCORE */
4926 /* Run until signal received */
4928 if (!binary_sema_down(tsk)) {
4929 #ifdef ENABLE_ADAPTIVE_SCHED
4930 dhd_sched_policy(dhd_dpc_prio);
4931 #endif /* ENABLE_ADAPTIVE_SCHED */
4932 SMP_RD_BARRIER_DEPENDS();
4933 if (tsk->terminated) {
4937 /* Call bus dpc unless it indicated down (then clean stop) */
4938 if (dhd->pub.busstate != DHD_BUS_DOWN) {
4939 #ifdef DEBUG_DPC_THREAD_WATCHDOG
4940 int resched_cnt = 0;
4941 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
4942 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
4943 while (dhd_bus_dpc(dhd->pub.bus)) {
4944 /* process all data */
4945 #ifdef DEBUG_DPC_THREAD_WATCHDOG
4947 if (resched_cnt > MAX_RESCHED_CNT) {
4948 DHD_INFO(("%s Calling msleep to"
4949 "let other processes run. \n",
4951 dhd->pub.dhd_bug_on = true;
4955 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
4957 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
4958 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4961 dhd_bus_stop(dhd->pub.bus, TRUE);
4962 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4968 complete_and_exit(&tsk->completed, 0);
4972 dhd_rxf_thread(void *data)
4974 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4975 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4976 #if defined(WAIT_DEQUEUE)
4977 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
4978 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
4980 dhd_pub_t *pub = &dhd->pub;
4982 /* This thread doesn't need any user-level access,
4983 * so get rid of all our resources
4985 if (dhd_rxf_prio > 0)
4987 struct sched_param param;
4988 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
4989 setScheduler(current, SCHED_FIFO, ¶m);
4992 DAEMONIZE("dhd_rxf");
4993 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
4995 /* signal: thread has started */
4996 complete(&tsk->completed);
4997 #ifdef CUSTOM_SET_CPUCORE
4998 dhd->pub.current_rxf = current;
4999 #endif /* CUSTOM_SET_CPUCORE */
5000 /* Run until signal received */
5002 if (down_interruptible(&tsk->sema) == 0) {
5004 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
5007 #ifdef ENABLE_ADAPTIVE_SCHED
5008 dhd_sched_policy(dhd_rxf_prio);
5009 #endif /* ENABLE_ADAPTIVE_SCHED */
5011 SMP_RD_BARRIER_DEPENDS();
5013 if (tsk->terminated) {
5016 skb = dhd_rxf_dequeue(pub);
5022 void *skbnext = PKTNEXT(pub->osh, skb);
5023 PKTSETNEXT(pub->osh, skb, NULL);
5024 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5025 __FUNCTION__, __LINE__);
5026 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5030 local_irq_save(flags);
5032 local_irq_restore(flags);
5037 #if defined(WAIT_DEQUEUE)
5038 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5040 watchdogTime = OSL_SYSUPTIME();
5044 DHD_OS_WAKE_UNLOCK(pub);
5049 complete_and_exit(&tsk->completed, 0);
5053 void dhd_dpc_enable(dhd_pub_t *dhdp)
5057 if (!dhdp || !dhdp->info)
5063 __skb_queue_head_init(&dhd->rx_pend_queue);
5064 #endif /* DHD_LB_RXP */
5066 if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
5067 tasklet_enable(&dhd->tx_compl_tasklet);
5068 #endif /* DHD_LB_TXC */
5070 if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
5071 tasklet_enable(&dhd->rx_compl_tasklet);
5072 #endif /* DHD_LB_RXC */
5074 if (atomic_read(&dhd->tasklet.count) == 1)
5075 tasklet_enable(&dhd->tasklet);
5077 #endif /* BCMPCIE */
5082 dhd_dpc_kill(dhd_pub_t *dhdp)
5096 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5097 tasklet_disable(&dhd->tasklet);
5098 tasklet_kill(&dhd->tasklet);
5099 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5103 __skb_queue_purge(&dhd->rx_pend_queue);
5104 #endif /* DHD_LB_RXP */
5105 /* Kill the Load Balancing Tasklets */
5106 #if defined(DHD_LB_TXC)
5107 tasklet_disable(&dhd->tx_compl_tasklet);
5108 tasklet_kill(&dhd->tx_compl_tasklet);
5109 #endif /* DHD_LB_TXC */
5110 #if defined(DHD_LB_RXC)
5111 tasklet_disable(&dhd->rx_compl_tasklet);
5112 tasklet_kill(&dhd->rx_compl_tasklet);
5113 #endif /* DHD_LB_RXC */
5116 #endif /* BCMPCIE */
5123 dhd = (dhd_info_t *)data;
5125 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5126 * down below , wake lock is set,
5127 * the tasklet is initialized in dhd_attach()
5129 /* Call bus dpc unless it indicated down (then clean stop) */
5130 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5131 if (dhd_bus_dpc(dhd->pub.bus)) {
5132 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5133 tasklet_schedule(&dhd->tasklet);
5136 dhd_bus_stop(dhd->pub.bus, TRUE);
5141 dhd_sched_dpc(dhd_pub_t *dhdp)
5143 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5145 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5146 DHD_OS_WAKE_LOCK(dhdp);
5147 /* If the semaphore does not get up,
5148 * wake unlock should be done here
5150 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5151 DHD_OS_WAKE_UNLOCK(dhdp);
5155 tasklet_schedule(&dhd->tasklet);
5160 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5162 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5163 #ifdef RXF_DEQUEUE_ON_BUSY
5166 #endif /* RXF_DEQUEUE_ON_BUSY */
5168 DHD_OS_WAKE_LOCK(dhdp);
5170 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5171 #ifdef RXF_DEQUEUE_ON_BUSY
5173 ret = dhd_rxf_enqueue(dhdp, skb);
5174 if (ret == BCME_OK || ret == BCME_ERROR)
5177 OSL_SLEEP(50); /* waiting for dequeueing */
5178 } while (retry-- > 0);
5180 if (retry <= 0 && ret == BCME_BUSY) {
5184 void *skbnext = PKTNEXT(dhdp->osh, skbp);
5185 PKTSETNEXT(dhdp->osh, skbp, NULL);
5186 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5187 __FUNCTION__, __LINE__);
5191 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
5193 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5194 up(&dhd->thr_rxf_ctl.sema);
5197 #else /* RXF_DEQUEUE_ON_BUSY */
5199 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5202 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5203 up(&dhd->thr_rxf_ctl.sema);
5206 #endif /* RXF_DEQUEUE_ON_BUSY */
5209 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5210 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5213 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5215 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5221 memset(&ioc, 0, sizeof(ioc));
5223 ioc.cmd = WLC_GET_VAR;
5225 ioc.len = (uint)sizeof(buf);
5228 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5229 buf[sizeof(buf) - 1] = '\0';
5230 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5231 /* Check for older dongle image that doesn't support toe_ol */
5233 DHD_ERROR(("%s: toe not supported by device\n",
5234 dhd_ifname(&dhd->pub, ifidx)));
5238 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5242 memcpy(toe_ol, buf, sizeof(uint32));
5246 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5248 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5254 memset(&ioc, 0, sizeof(ioc));
5256 ioc.cmd = WLC_SET_VAR;
5258 ioc.len = (uint)sizeof(buf);
5261 /* Set toe_ol as requested */
5263 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5264 buf[sizeof(buf) - 1] = '\0';
5265 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
5267 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5268 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5269 dhd_ifname(&dhd->pub, ifidx), ret));
5273 /* Enable toe globally only if any components are enabled. */
5275 toe = (toe_ol != 0);
5278 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
5280 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5281 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5289 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5290 void dhd_set_scb_probe(dhd_pub_t *dhd)
5293 wl_scb_probe_t scb_probe;
5294 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5296 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
5298 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5302 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
5304 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
5305 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5308 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5310 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5312 bcm_mkiovar("scb_probe", (char *)&scb_probe,
5313 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
5314 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5315 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5319 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5321 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
5323 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5325 dhd_info_t *dhd = DHD_DEV_INFO(net);
5327 snprintf(info->driver, sizeof(info->driver), "wl");
5328 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5331 struct ethtool_ops dhd_ethtool_ops = {
5332 .get_drvinfo = dhd_ethtool_get_drvinfo
5334 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
5337 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5339 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5341 struct ethtool_drvinfo info;
5342 char drvname[sizeof(info.driver)];
5345 struct ethtool_value edata;
5346 uint32 toe_cmpnt, csum_dir;
5350 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5352 /* all ethtool calls start with a cmd word */
5353 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5357 case ETHTOOL_GDRVINFO:
5358 /* Copy out any request driver name */
5359 if (copy_from_user(&info, uaddr, sizeof(info)))
5361 strncpy(drvname, info.driver, sizeof(info.driver));
5362 drvname[sizeof(info.driver)-1] = '\0';
5364 /* clear struct for return */
5365 memset(&info, 0, sizeof(info));
5368 /* if dhd requested, identify ourselves */
5369 if (strcmp(drvname, "?dhd") == 0) {
5370 snprintf(info.driver, sizeof(info.driver), "dhd");
5371 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5372 info.version[sizeof(info.version) - 1] = '\0';
5375 /* otherwise, require dongle to be up */
5376 else if (!dhd->pub.up) {
5377 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5381 /* finally, report dongle driver type */
5382 else if (dhd->pub.iswl)
5383 snprintf(info.driver, sizeof(info.driver), "wl");
5385 snprintf(info.driver, sizeof(info.driver), "xx");
5387 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5388 if (copy_to_user(uaddr, &info, sizeof(info)))
5390 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5391 (int)sizeof(drvname), drvname, info.driver));
5395 /* Get toe offload components from dongle */
5396 case ETHTOOL_GRXCSUM:
5397 case ETHTOOL_GTXCSUM:
5398 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5401 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5404 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5406 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5410 /* Set toe offload components in dongle */
5411 case ETHTOOL_SRXCSUM:
5412 case ETHTOOL_STXCSUM:
5413 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5416 /* Read the current settings, update and write back */
5417 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5420 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5422 if (edata.data != 0)
5423 toe_cmpnt |= csum_dir;
5425 toe_cmpnt &= ~csum_dir;
5427 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5430 /* If setting TX checksum mode, tell Linux the new mode */
5431 if (cmd == ETHTOOL_STXCSUM) {
5433 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5435 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5447 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5449 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5454 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5461 dhd = (dhd_info_t *)dhdp->info;
5462 #if !defined(BCMPCIE)
5463 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5464 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5469 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5470 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5472 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
5473 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5474 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5476 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5477 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5478 #endif /* BCMPCIE */
5479 if (dhdp->hang_reason == 0) {
5480 if (dhdp->dongle_trap_occured) {
5481 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5483 } else if (dhdp->d3ackcnt_timeout) {
5484 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
5485 #endif /* BCMPCIE */
5487 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
5490 net_os_send_hang_message(net);
5496 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
5498 int bcmerror = BCME_OK;
5500 struct net_device *net;
5502 net = dhd_idx2net(pub, ifidx);
5504 bcmerror = BCME_BADARG;
5509 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
5511 /* check for local dhd ioctl and handle it */
5512 if (ioc->driver == DHD_IOCTL_MAGIC) {
5513 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
5515 pub->bcmerror = bcmerror;
5519 /* send to dongle (must be up, and wl). */
5520 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
5521 if (allow_delay_fwdl) {
5522 int ret = dhd_bus_start(pub);
5524 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5525 bcmerror = BCME_DONGLE_DOWN;
5529 bcmerror = BCME_DONGLE_DOWN;
5535 bcmerror = BCME_DONGLE_DOWN;
5540 * Flush the TX queue if required for proper message serialization:
5541 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
5542 * prevent M4 encryption and
5543 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
5544 * prevent disassoc frame being sent before WPS-DONE frame.
5546 if (ioc->cmd == WLC_SET_KEY ||
5547 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5548 strncmp("wsec_key", data_buf, 9) == 0) ||
5549 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5550 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
5551 ioc->cmd == WLC_DISASSOC)
5552 dhd_wait_pend8021x(net);
5556 /* short cut wl ioctl calls here */
5557 if (strcmp("htsf", data_buf) == 0) {
5558 dhd_ioctl_htsf_get(dhd, 0);
5562 if (strcmp("htsflate", data_buf) == 0) {
5564 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
5565 memset(&maxdelayts, 0, sizeof(tstamp_t));
5569 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5570 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5571 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5572 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5578 if (strcmp("htsfclear", data_buf) == 0) {
5579 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5580 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5581 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5582 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5586 if (strcmp("htsfhis", data_buf) == 0) {
5587 dhd_dump_htsfhisto(&vi_d1, "H to D");
5588 dhd_dump_htsfhisto(&vi_d2, "D to D");
5589 dhd_dump_htsfhisto(&vi_d3, "D to H");
5590 dhd_dump_htsfhisto(&vi_d4, "H to H");
5593 if (strcmp("tsport", data_buf) == 0) {
5595 memcpy(&tsport, data_buf + 7, 4);
5597 DHD_ERROR(("current timestamp port: %d \n", tsport));
5602 #endif /* WLMEDIA_HTSF */
5604 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
5605 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
5607 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5609 bcmerror = BCME_UNSUPPORTED;
5615 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
5616 if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
5617 /* Print IOVAR Information */
5618 DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
5619 __FUNCTION__, (char *)data_buf, ioc->set));
5620 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5621 prhex(NULL, data_buf + strlen(data_buf) + 1,
5622 buflen - strlen(data_buf) - 1);
5625 /* Print IOCTL Information */
5626 DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
5627 __FUNCTION__, ioc->cmd, ioc->set));
5628 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5629 prhex(NULL, data_buf, buflen);
5633 #endif /* DHD_DEBUG */
5635 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5638 dhd_check_hang(net, pub, bcmerror);
5644 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
5646 dhd_info_t *dhd = DHD_DEV_INFO(net);
5650 void *local_buf = NULL;
5653 DHD_OS_WAKE_LOCK(&dhd->pub);
5654 DHD_PERIM_LOCK(&dhd->pub);
5656 /* Interface up check for built-in type */
5657 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
5658 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
5663 /* send to dongle only if we are not waiting for reload already */
5664 if (dhd->pub.hang_was_sent) {
5665 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
5666 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
5667 ret = BCME_DONGLE_DOWN;
5671 ifidx = dhd_net2idx(dhd, net);
5672 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
5674 if (ifidx == DHD_BAD_IF) {
5675 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
5680 #if defined(WL_WIRELESS_EXT)
5681 /* linux wireless extensions */
5682 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
5683 /* may recurse, do NOT lock */
5684 ret = wl_iw_ioctl(net, ifr, cmd);
5687 #endif /* defined(WL_WIRELESS_EXT) */
5689 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5690 if (cmd == SIOCETHTOOL) {
5691 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
5694 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5696 if (cmd == SIOCDEVPRIVATE+1) {
5697 ret = wl_android_priv_cmd(net, ifr, cmd);
5698 dhd_check_hang(net, &dhd->pub, ret);
5702 if (cmd != SIOCDEVPRIVATE) {
5707 memset(&ioc, 0, sizeof(ioc));
5709 #ifdef CONFIG_COMPAT
5710 if (is_compat_task()) {
5711 compat_wl_ioctl_t compat_ioc;
5712 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
5716 ioc.cmd = compat_ioc.cmd;
5717 ioc.buf = compat_ptr(compat_ioc.buf);
5718 ioc.len = compat_ioc.len;
5719 ioc.set = compat_ioc.set;
5720 ioc.used = compat_ioc.used;
5721 ioc.needed = compat_ioc.needed;
5722 /* To differentiate between wl and dhd read 4 more byes */
5723 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
5724 sizeof(uint)) != 0)) {
5729 #endif /* CONFIG_COMPAT */
5731 /* Copy the ioc control structure part of ioctl request */
5732 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5737 /* To differentiate between wl and dhd read 4 more byes */
5738 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5739 sizeof(uint)) != 0)) {
5745 if (!capable(CAP_NET_ADMIN)) {
5751 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
5752 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
5757 DHD_PERIM_UNLOCK(&dhd->pub);
5758 if (copy_from_user(local_buf, ioc.buf, buflen)) {
5759 DHD_PERIM_LOCK(&dhd->pub);
5763 DHD_PERIM_LOCK(&dhd->pub);
5765 *(char *)(local_buf + buflen) = '\0';
5768 ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
5770 if (!ret && buflen && local_buf && ioc.buf) {
5771 DHD_PERIM_UNLOCK(&dhd->pub);
5772 if (copy_to_user(ioc.buf, local_buf, buflen))
5774 DHD_PERIM_LOCK(&dhd->pub);
5779 MFREE(dhd->pub.osh, local_buf, buflen+1);
5782 DHD_PERIM_UNLOCK(&dhd->pub);
5783 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5785 return OSL_ERROR(ret);
5789 #ifdef FIX_CPU_MIN_CLOCK
5790 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
5793 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5794 mutex_init(&dhd->cpufreq_fix);
5796 dhd->cpufreq_fix_status = FALSE;
5801 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
5803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5804 mutex_lock(&dhd->cpufreq_fix);
5806 if (dhd && !dhd->cpufreq_fix_status) {
5807 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
5808 #ifdef FIX_BUS_MIN_CLOCK
5809 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
5810 #endif /* FIX_BUS_MIN_CLOCK */
5811 DHD_ERROR(("pm_qos_add_requests called\n"));
5813 dhd->cpufreq_fix_status = TRUE;
5815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5816 mutex_unlock(&dhd->cpufreq_fix);
5820 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
5822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5823 mutex_lock(&dhd ->cpufreq_fix);
5825 if (dhd && dhd->cpufreq_fix_status != TRUE) {
5826 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5827 mutex_unlock(&dhd->cpufreq_fix);
5832 pm_qos_remove_request(&dhd->dhd_cpu_qos);
5833 #ifdef FIX_BUS_MIN_CLOCK
5834 pm_qos_remove_request(&dhd->dhd_bus_qos);
5835 #endif /* FIX_BUS_MIN_CLOCK */
5836 DHD_ERROR(("pm_qos_add_requests called\n"));
5838 dhd->cpufreq_fix_status = FALSE;
5839 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5840 mutex_unlock(&dhd->cpufreq_fix);
5843 #endif /* FIX_CPU_MIN_CLOCK */
5845 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
5846 int dhd_deepsleep(dhd_info_t *dhd, int flag)
5857 case 1 : /* Deepsleep on */
5858 DHD_ERROR(("dhd_deepsleep: ON\n"));
5859 /* give some time to sysioc_work before deepsleep */
5861 #ifdef PKT_FILTER_SUPPORT
5862 /* disable pkt filter */
5863 dhd_enable_packet_filter(0, dhdp);
5864 #endif /* PKT_FILTER_SUPPORT */
5867 memset(iovbuf, 0, sizeof(iovbuf));
5868 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5869 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5871 /* Enable Deepsleep */
5873 memset(iovbuf, 0, sizeof(iovbuf));
5874 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5875 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5878 case 0: /* Deepsleep Off */
5879 DHD_ERROR(("dhd_deepsleep: OFF\n"));
5881 /* Disable Deepsleep */
5882 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
5884 memset(iovbuf, 0, sizeof(iovbuf));
5885 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5886 iovbuf, sizeof(iovbuf));
5887 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
5888 sizeof(iovbuf), TRUE, 0);
5890 memset(iovbuf, 0, sizeof(iovbuf));
5891 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5892 iovbuf, sizeof(iovbuf));
5893 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
5894 sizeof(iovbuf), FALSE, 0)) < 0) {
5895 DHD_ERROR(("the error of dhd deepsleep status"
5896 " ret value :%d\n", ret));
5898 if (!(*(int *)iovbuf)) {
5899 DHD_ERROR(("deepsleep mode is 0,"
5900 " count: %d\n", cnt));
5908 memset(iovbuf, 0, sizeof(iovbuf));
5909 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5910 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5918 dhd_stop(struct net_device *net)
5921 dhd_info_t *dhd = DHD_DEV_INFO(net);
5922 DHD_OS_WAKE_LOCK(&dhd->pub);
5923 DHD_PERIM_LOCK(&dhd->pub);
5924 printf("%s: Enter %p\n", __FUNCTION__, net);
5925 dhd->pub.rxcnt_timeout = 0;
5926 dhd->pub.txcnt_timeout = 0;
5929 dhd->pub.d3ackcnt_timeout = 0;
5930 #endif /* BCMPCIE */
5932 if (dhd->pub.up == 0) {
5936 dhd_if_flush_sta(DHD_DEV_IFP(net));
5938 /* Disable Runtime PM before interface down */
5939 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5941 #ifdef FIX_CPU_MIN_CLOCK
5942 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
5943 dhd_rollback_cpu_freq(dhd);
5944 #endif /* FIX_CPU_MIN_CLOCK */
5946 ifidx = dhd_net2idx(dhd, net);
5947 BCM_REFERENCE(ifidx);
5949 /* Set state and stop OS transmissions */
5950 netif_stop_queue(net);
5956 wl_cfg80211_down(NULL);
5958 ifp = dhd->iflist[0];
5959 ASSERT(ifp && ifp->net);
5961 * For CFG80211: Clean up all the left over virtual interfaces
5962 * when the primary Interface is brought down. [ifconfig wlan0 down]
5964 if (!dhd_download_fw_on_driverload) {
5965 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
5966 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5969 #ifdef WL_CFG80211_P2P_DEV_IF
5970 wl_cfg80211_del_p2p_wdev();
5971 #endif /* WL_CFG80211_P2P_DEV_IF */
5973 dhd_net_if_lock_local(dhd);
5974 for (i = 1; i < DHD_MAX_IFS; i++)
5975 dhd_remove_if(&dhd->pub, i, FALSE);
5977 if (ifp && ifp->net) {
5978 dhd_if_del_sta_list(ifp);
5981 #ifdef ARP_OFFLOAD_SUPPORT
5982 if (dhd_inetaddr_notifier_registered) {
5983 dhd_inetaddr_notifier_registered = FALSE;
5984 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
5986 #endif /* ARP_OFFLOAD_SUPPORT */
5987 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
5988 if (dhd_inet6addr_notifier_registered) {
5989 dhd_inet6addr_notifier_registered = FALSE;
5990 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
5992 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
5993 dhd_net_if_unlock_local(dhd);
5995 cancel_work_sync(dhd->dhd_deferred_wq);
5996 #if defined(DHD_LB) && defined(DHD_LB_RXP)
5997 __skb_queue_purge(&dhd->rx_pend_queue);
5998 #endif /* DHD_LB && DHD_LB_RXP */
6001 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6002 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6003 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6004 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6005 if (ifp->net == dhd->rx_napi_netdev) {
6006 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6007 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6008 skb_queue_purge(&dhd->rx_napi_queue);
6009 napi_disable(&dhd->rx_napi_struct);
6010 netif_napi_del(&dhd->rx_napi_struct);
6011 dhd->rx_napi_netdev = NULL;
6013 #endif /* DHD_LB && DHD_LB_RXP */
6016 #endif /* WL_CFG80211 */
6018 #ifdef PROP_TXSTATUS
6019 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6021 /* Stop the protocol module */
6022 dhd_prot_stop(&dhd->pub);
6024 OLD_MOD_DEC_USE_COUNT;
6026 if (ifidx == 0 && !dhd_download_fw_on_driverload)
6027 wl_android_wifi_off(net, TRUE);
6029 if (dhd->pub.conf->deepsleep)
6030 dhd_deepsleep(dhd, 1);
6032 dhd->pub.hang_was_sent = 0;
6034 /* Clear country spec for for built-in type driver */
6035 if (!dhd_download_fw_on_driverload) {
6036 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6037 dhd->pub.dhd_cspec.rev = 0;
6038 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6045 DHD_PERIM_UNLOCK(&dhd->pub);
6046 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6048 /* Destroy wakelock */
6049 if (!dhd_download_fw_on_driverload &&
6050 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6051 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6052 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6054 printf("%s: Exit\n", __FUNCTION__);
6059 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
6060 extern bool g_first_broadcast_scan;
6064 static int dhd_interworking_enable(dhd_pub_t *dhd)
6066 char iovbuf[WLC_IOCTL_SMLEN];
6067 uint32 enable = true;
6070 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
6071 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6073 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6076 if (ret == BCME_OK) {
6077 /* basic capabilities for HS20 REL2 */
6078 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
6079 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
6080 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6082 DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
6091 dhd_open(struct net_device *net)
6093 dhd_info_t *dhd = DHD_DEV_INFO(net);
6098 char iovbuf[WLC_IOCTL_SMLEN];
6099 dbus_config_t config;
6100 uint32 agglimit = 0;
6101 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
6102 #endif /* BCM_FD_AGGR */
6106 if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
6107 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6111 printf("%s: Enter %p\n", __FUNCTION__, net);
6112 #if defined(MULTIPLE_SUPPLICANT)
6113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6114 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6115 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
6117 mutex_lock(&_dhd_sdio_mutex_lock_);
6119 #endif /* MULTIPLE_SUPPLICANT */
6121 if (!dhd_download_fw_on_driverload &&
6122 !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6123 DHD_OS_WAKE_LOCK_INIT(dhd);
6124 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6127 #ifdef PREVENT_REOPEN_DURING_HANG
6128 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6129 if (dhd->pub.hang_was_sent == 1) {
6130 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6131 /* Force to bring down WLAN interface in case dhd_stop() is not called
6132 * from the upper layer when HANG event is triggered.
6134 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6135 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6141 #endif /* PREVENT_REOPEN_DURING_HANG */
6144 DHD_OS_WAKE_LOCK(&dhd->pub);
6145 DHD_PERIM_LOCK(&dhd->pub);
6146 dhd->pub.dongle_trap_occured = 0;
6147 dhd->pub.hang_was_sent = 0;
6148 dhd->pub.hang_reason = 0;
6149 #ifdef DHD_LOSSLESS_ROAMING
6150 dhd->pub.dequeue_prec_map = ALLPRIO;
6154 * Force start if ifconfig_up gets called before START command
6155 * We keep WEXT's wl_control_wl_start to provide backward compatibility
6156 * This should be removed in the future
6158 ret = wl_control_wl_start(net);
6160 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6166 ifidx = dhd_net2idx(dhd, net);
6167 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6170 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6175 if (!dhd->iflist[ifidx]) {
6176 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6182 atomic_set(&dhd->pend_8021x_cnt, 0);
6183 if (!dhd_download_fw_on_driverload) {
6184 DHD_ERROR(("\n%s\n", dhd_version));
6185 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
6186 g_first_broadcast_scan = TRUE;
6188 ret = wl_android_wifi_on(net);
6190 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
6191 __FUNCTION__, ret));
6196 #ifdef FIX_CPU_MIN_CLOCK
6197 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
6198 dhd_init_cpufreq_fix(dhd);
6199 dhd_fix_cpu_freq(dhd);
6201 #endif /* FIX_CPU_MIN_CLOCK */
6203 if (dhd->pub.busstate != DHD_BUS_DATA) {
6205 /* try to bring up bus */
6206 DHD_PERIM_UNLOCK(&dhd->pub);
6207 ret = dhd_bus_start(&dhd->pub);
6208 DHD_PERIM_LOCK(&dhd->pub);
6210 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6216 if (dhd_download_fw_on_driverload) {
6217 if (dhd->pub.conf->deepsleep)
6218 dhd_deepsleep(dhd, 0);
6222 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
6225 memset(iovbuf, 0, sizeof(iovbuf));
6226 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
6227 iovbuf, sizeof(iovbuf));
6229 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
6230 agglimit = *(uint32 *)iovbuf;
6231 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
6232 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
6233 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
6234 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
6235 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
6236 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
6239 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
6240 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
6243 /* Set aggregation for TX */
6244 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
6245 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
6247 /* Set aggregation for RX */
6248 memset(iovbuf, 0, sizeof(iovbuf));
6249 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
6250 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
6251 dhd->pub.info->fdaggr = 0;
6252 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
6253 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
6254 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
6255 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
6257 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
6259 #endif /* BCM_FD_AGGR */
6261 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
6262 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6265 /* Get current TOE mode from dongle */
6266 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
6267 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
6269 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
6273 #if defined(WL_CFG80211)
6274 if (unlikely(wl_cfg80211_up(NULL))) {
6275 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
6279 if (!dhd_download_fw_on_driverload) {
6280 #ifdef ARP_OFFLOAD_SUPPORT
6281 dhd->pend_ipaddr = 0;
6282 if (!dhd_inetaddr_notifier_registered) {
6283 dhd_inetaddr_notifier_registered = TRUE;
6284 register_inetaddr_notifier(&dhd_inetaddr_notifier);
6286 #endif /* ARP_OFFLOAD_SUPPORT */
6287 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6288 if (!dhd_inet6addr_notifier_registered) {
6289 dhd_inet6addr_notifier_registered = TRUE;
6290 register_inet6addr_notifier(&dhd_inet6addr_notifier);
6292 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6294 DHD_LB_STATS_INIT(&dhd->pub);
6296 __skb_queue_head_init(&dhd->rx_pend_queue);
6297 #endif /* DHD_LB_RXP */
6301 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6302 #if defined(SET_RPS_CPUS)
6303 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6305 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
6307 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6308 #if defined(DHD_LB) && defined(DHD_LB_RXP)
6309 if (dhd->rx_napi_netdev == NULL) {
6310 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
6311 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
6312 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
6313 dhd_napi_poll, dhd_napi_weight);
6314 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
6315 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6316 napi_enable(&dhd->rx_napi_struct);
6317 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
6318 skb_queue_head_init(&dhd->rx_napi_queue);
6320 #endif /* DHD_LB && DHD_LB_RXP */
6321 #if defined(NUM_SCB_MAX_PROBE)
6322 dhd_set_scb_probe(&dhd->pub);
6323 #endif /* NUM_SCB_MAX_PROBE */
6324 #endif /* WL_CFG80211 */
6327 /* Allow transmit calls */
6328 netif_start_queue(net);
6331 OLD_MOD_INC_USE_COUNT;
6334 dhd_dbg_init(&dhd->pub);
6342 DHD_PERIM_UNLOCK(&dhd->pub);
6343 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6345 #if defined(MULTIPLE_SUPPLICANT)
6346 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6347 mutex_unlock(&_dhd_sdio_mutex_lock_);
6349 #endif /* MULTIPLE_SUPPLICANT */
6351 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
6355 int dhd_do_driver_init(struct net_device *net)
6357 dhd_info_t *dhd = NULL;
6360 DHD_ERROR(("Primary Interface not initialized \n"));
6364 #ifdef MULTIPLE_SUPPLICANT
6365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
6366 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6367 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
6370 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
6371 #endif /* MULTIPLE_SUPPLICANT */
6373 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
6374 dhd = DHD_DEV_INFO(net);
6376 /* If driver is already initialized, do nothing
6378 if (dhd->pub.busstate == DHD_BUS_DATA) {
6379 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
6383 if (dhd_open(net) < 0) {
6384 DHD_ERROR(("Driver Init Failed \n"));
6392 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6396 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6400 /* handle IF event caused by wl commands, SoftAP, WEXT and
6401 * anything else. This has to be done asynchronously otherwise
6402 * DPC will be blocked (and iovars will timeout as DPC has no chance
6403 * to read the response back)
6405 if (ifevent->ifidx > 0) {
6406 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6407 if (if_event == NULL) {
6408 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
6409 MALLOCED(dhdinfo->pub.osh)));
6413 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6414 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6415 strncpy(if_event->name, name, IFNAMSIZ);
6416 if_event->name[IFNAMSIZ - 1] = '\0';
6417 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
6418 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
6425 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6427 dhd_if_event_t *if_event;
6430 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6432 #endif /* WL_CFG80211 */
6434 /* handle IF event caused by wl commands, SoftAP, WEXT and
6437 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6438 if (if_event == NULL) {
6439 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
6440 MALLOCED(dhdinfo->pub.osh)));
6443 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6444 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6445 strncpy(if_event->name, name, IFNAMSIZ);
6446 if_event->name[IFNAMSIZ - 1] = '\0';
6447 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
6448 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
6453 /* unregister and free the existing net_device interface (if any) in iflist and
6454 * allocate a new one. the slot is reused. this function does NOT register the
6455 * new interface to linux kernel. dhd_register_if does the job
6458 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
6459 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
6461 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6464 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
6465 ifp = dhdinfo->iflist[ifidx];
6468 if (ifp->net != NULL) {
6469 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
6471 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
6473 /* in unregister_netdev case, the interface gets freed by net->destructor
6474 * (which is set to free_netdev)
6476 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6477 free_netdev(ifp->net);
6479 netif_stop_queue(ifp->net);
6481 unregister_netdev(ifp->net);
6483 unregister_netdevice(ifp->net);
6488 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
6490 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
6495 memset(ifp, 0, sizeof(dhd_if_t));
6496 ifp->info = dhdinfo;
6498 ifp->bssidx = bssidx;
6500 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
6502 /* Allocate etherdev, including space for private structure */
6503 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
6504 if (ifp->net == NULL) {
6505 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
6509 /* Setup the dhd interface's netdevice private structure. */
6510 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
6512 if (name && name[0]) {
6513 strncpy(ifp->net->name, name, IFNAMSIZ);
6514 ifp->net->name[IFNAMSIZ - 1] = '\0';
6519 ifp->net->destructor = free_netdev;
6521 ifp->net->destructor = dhd_netdev_free;
6523 ifp->net->destructor = free_netdev;
6524 #endif /* WL_CFG80211 */
6525 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
6526 ifp->name[IFNAMSIZ - 1] = '\0';
6527 dhdinfo->iflist[ifidx] = ifp;
6529 /* initialize the dongle provided if name */
6531 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
6533 strncpy(ifp->dngl_name, name, IFNAMSIZ);
6535 #ifdef PCIE_FULL_DONGLE
6536 /* Initialize STA info list */
6537 INIT_LIST_HEAD(&ifp->sta_list);
6538 DHD_IF_STA_LIST_LOCK_INIT(ifp);
6539 #endif /* PCIE_FULL_DONGLE */
6541 #ifdef DHD_L2_FILTER
6542 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
6543 ifp->parp_allnode = TRUE;
6550 if (ifp->net != NULL) {
6551 dhd_dev_priv_clear(ifp->net);
6552 free_netdev(ifp->net);
6555 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6559 dhdinfo->iflist[ifidx] = NULL;
6563 /* unregister and free the the net_device interface associated with the indexed
6564 * slot, also free the slot memory and set the slot pointer to NULL
6567 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
6569 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6572 ifp = dhdinfo->iflist[ifidx];
6575 if (ifp->net != NULL) {
6576 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
6578 /* in unregister_netdev case, the interface gets freed by net->destructor
6579 * (which is set to free_netdev)
6581 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6582 free_netdev(ifp->net);
6584 netif_tx_disable(ifp->net);
6588 #if defined(SET_RPS_CPUS)
6589 custom_rps_map_clear(ifp->net->_rx);
6590 #endif /* SET_RPS_CPUS */
6591 #if defined(SET_RPS_CPUS)
6592 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
6593 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
6594 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
6597 unregister_netdev(ifp->net);
6599 unregister_netdevice(ifp->net);
6602 dhdinfo->iflist[ifidx] = NULL;
6605 dhd_wmf_cleanup(dhdpub, ifidx);
6606 #endif /* DHD_WMF */
6607 #ifdef DHD_L2_FILTER
6608 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
6609 NULL, FALSE, dhdpub->tickcnt);
6610 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
6611 ifp->phnd_arp_table = NULL;
6612 #endif /* DHD_L2_FILTER */
6614 dhd_if_del_sta_list(ifp);
6616 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6623 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6624 static struct net_device_ops dhd_ops_pri = {
6625 .ndo_open = dhd_open,
6626 .ndo_stop = dhd_stop,
6627 .ndo_get_stats = dhd_get_stats,
6628 .ndo_do_ioctl = dhd_ioctl_entry,
6629 .ndo_start_xmit = dhd_start_xmit,
6630 .ndo_set_mac_address = dhd_set_mac_address,
6631 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6632 .ndo_set_rx_mode = dhd_set_multicast_list,
6634 .ndo_set_multicast_list = dhd_set_multicast_list,
6638 static struct net_device_ops dhd_ops_virt = {
6639 .ndo_get_stats = dhd_get_stats,
6640 .ndo_do_ioctl = dhd_ioctl_entry,
6641 .ndo_start_xmit = dhd_start_xmit,
6642 .ndo_set_mac_address = dhd_set_mac_address,
6643 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6644 .ndo_set_rx_mode = dhd_set_multicast_list,
6646 .ndo_set_multicast_list = dhd_set_multicast_list,
6649 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
6652 extern void debugger_init(void *bus_handle);
6656 #ifdef SHOW_LOGTRACE
6657 static char *logstrs_path = "/root/logstrs.bin";
6658 static char *st_str_file_path = "/root/rtecdc.bin";
6659 static char *map_file_path = "/root/rtecdc.map";
6660 static char *rom_st_str_file_path = "/root/roml.bin";
6661 static char *rom_map_file_path = "/root/roml.map";
6663 #define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */
6664 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
6665 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
6666 static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
6667 static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
6668 static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
6669 static char *ram_file_str = "rtecdc";
6670 static char *rom_file_str = "roml";
6671 #define RAMSTART_BIT 0x01
6672 #define RDSTART_BIT 0x02
6673 #define RDEND_BIT 0x04
6674 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
6676 module_param(logstrs_path, charp, S_IRUGO);
6677 module_param(st_str_file_path, charp, S_IRUGO);
6678 module_param(map_file_path, charp, S_IRUGO);
6679 module_param(rom_st_str_file_path, charp, S_IRUGO);
6680 module_param(rom_map_file_path, charp, S_IRUGO);
6683 dhd_init_logstrs_array(dhd_event_log_t *temp)
6685 struct file *filep = NULL;
6688 char *raw_fmts = NULL;
6689 int logstrs_size = 0;
6691 logstr_header_t *hdr = NULL;
6692 uint32 *lognums = NULL;
6693 char *logstrs = NULL;
6703 filep = filp_open(logstrs_path, O_RDONLY, 0);
6705 if (IS_ERR(filep)) {
6706 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
6709 error = vfs_stat(logstrs_path, &stat);
6711 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
6714 logstrs_size = (int) stat.size;
6716 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6717 if (raw_fmts == NULL) {
6718 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
6721 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
6722 DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
6726 /* Remember header from the logstrs.bin file */
6727 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6728 sizeof(logstr_header_t));
6730 if (hdr->log_magic == LOGSTRS_MAGIC) {
6732 * logstrs.bin start with header.
6734 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
6735 ram_index = (hdr->ram_lognums_offset -
6736 hdr->rom_lognums_offset) / sizeof(uint32);
6737 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6738 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
6741 * Legacy logstrs.bin format without header.
6743 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6744 if (num_fmts == 0) {
6745 /* Legacy ROM/RAM logstrs.bin format:
6746 * - ROM 'lognums' section
6747 * - RAM 'lognums' section
6748 * - ROM 'logstrs' section.
6749 * - RAM 'logstrs' section.
6751 * 'lognums' is an array of indexes for the strings in the
6752 * 'logstrs' section. The first uint32 is 0 (index of first
6753 * string in ROM 'logstrs' section).
6755 * The 4324b5 is the only ROM that uses this legacy format. Use the
6756 * fixed number of ROM fmtnums to find the start of the RAM
6757 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
6758 * find the ROM 'logstrs' section.
6760 #define NUM_4324B5_ROM_FMTS 186
6761 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
6762 ram_index = NUM_4324B5_ROM_FMTS;
6763 lognums = (uint32 *) raw_fmts;
6764 num_fmts = ram_index;
6765 logstrs = (char *) &raw_fmts[num_fmts << 2];
6766 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
6768 logstrs = (char *) &raw_fmts[num_fmts << 2];
6771 /* Legacy RAM-only logstrs.bin format:
6772 * - RAM 'lognums' section
6773 * - RAM 'logstrs' section.
6775 * 'lognums' is an array of indexes for the strings in the
6776 * 'logstrs' section. The first uint32 is an index to the
6777 * start of 'logstrs'. Therefore, if this index is divided
6778 * by 'sizeof(uint32)' it provides the number of logstr
6782 lognums = (uint32 *) raw_fmts;
6783 logstrs = (char *) &raw_fmts[num_fmts << 2];
6786 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
6788 DHD_ERROR(("Failed to allocate fmts memory\n"));
6792 for (i = 0; i < num_fmts; i++) {
6793 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6794 * (they are 0-indexed relative to 'rom_logstrs_offset').
6796 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6797 * are 0-indexed relative to the start of the logstrs.bin file).
6799 if (i == ram_index) {
6802 fmts[i] = &logstrs[lognums[i]];
6805 temp->raw_fmts = raw_fmts;
6806 temp->num_fmts = num_fmts;
6807 filp_close(filep, NULL);
6816 filp_close(filep, NULL);
6823 dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
6826 struct file *filep = NULL;
6828 char *raw_fmts = NULL;
6829 uint32 read_size = READ_NUM_BYTES;
6839 if (fname == NULL) {
6840 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
6847 filep = filp_open(fname, O_RDONLY, 0);
6848 if (IS_ERR(filep)) {
6849 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
6853 /* Allocate 1 byte more than read_size to terminate it with NULL */
6854 raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
6855 if (raw_fmts == NULL) {
6856 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6860 /* read ram start, rodata_start and rodata_end values from map file */
6862 while (count != ALL_MAP_VAL)
6864 error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
6866 DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
6867 map_file_path, error));
6871 if (error < read_size) {
6873 * since we reset file pos back to earlier pos by
6874 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6875 * So if ret value is less than read_size, reached EOF don't read further
6879 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6880 raw_fmts[read_size] = '\0';
6882 /* Get ramstart address */
6883 if ((cptr = strstr(raw_fmts, ramstart_str))) {
6884 cptr = cptr - BYTES_AHEAD_NUM;
6885 sscanf(cptr, "%x %c text_start", ramstart, &c);
6886 count |= RAMSTART_BIT;
6889 /* Get ram rodata start address */
6890 if ((cptr = strstr(raw_fmts, rodata_start_str))) {
6891 cptr = cptr - BYTES_AHEAD_NUM;
6892 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6893 count |= RDSTART_BIT;
6896 /* Get ram rodata end address */
6897 if ((cptr = strstr(raw_fmts, rodata_end_str))) {
6898 cptr = cptr - BYTES_AHEAD_NUM;
6899 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6902 memset(raw_fmts, 0, read_size);
6904 * go back to predefined NUM of bytes so that we won't miss
6905 * the string and addr even if it comes as splited in next read.
6907 filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
6910 DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6911 *ramstart, *rodata_start, *rodata_end));
6913 DHD_ERROR(("readmap over \n"));
6921 filp_close(filep, NULL);
6924 if (count == ALL_MAP_VAL) {
6927 DHD_ERROR(("readmap error 0X%x \n", count));
6932 dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
6934 struct file *filep = NULL;
6936 char *raw_fmts = NULL;
6937 uint32 logstrs_size = 0;
6940 uint32 ramstart = 0;
6941 uint32 rodata_start = 0;
6942 uint32 rodata_end = 0;
6943 uint32 logfilebase = 0;
6945 error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
6946 if (error == BCME_ERROR) {
6947 DHD_ERROR(("readmap Error!! \n"));
6948 /* don't do event log parsing in actual case */
6949 temp->raw_sstr = NULL;
6952 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6953 ramstart, rodata_start, rodata_end));
6958 filep = filp_open(str_file, O_RDONLY, 0);
6959 if (IS_ERR(filep)) {
6960 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
6964 /* Full file size is huge. Just read required part */
6965 logstrs_size = rodata_end - rodata_start;
6967 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6968 if (raw_fmts == NULL) {
6969 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6973 logfilebase = rodata_start - ramstart;
6975 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
6977 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
6981 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
6982 if (error != logstrs_size) {
6983 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
6987 if (strstr(str_file, ram_file_str) != NULL) {
6988 temp->raw_sstr = raw_fmts;
6989 temp->ramstart = ramstart;
6990 temp->rodata_start = rodata_start;
6991 temp->rodata_end = rodata_end;
6992 } else if (strstr(str_file, rom_file_str) != NULL) {
6993 temp->rom_raw_sstr = raw_fmts;
6994 temp->rom_ramstart = ramstart;
6995 temp->rom_rodata_start = rodata_start;
6996 temp->rom_rodata_end = rodata_end;
6999 filp_close(filep, NULL);
7009 filp_close(filep, NULL);
7011 if (strstr(str_file, ram_file_str) != NULL) {
7012 temp->raw_sstr = NULL;
7013 } else if (strstr(str_file, rom_file_str) != NULL) {
7014 temp->rom_raw_sstr = NULL;
7019 #endif /* SHOW_LOGTRACE */
7023 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
7025 dhd_info_t *dhd = NULL;
7026 struct net_device *net = NULL;
7027 char if_name[IFNAMSIZ] = {'\0'};
7028 uint32 bus_type = -1;
7029 uint32 bus_num = -1;
7030 uint32 slot_num = -1;
7031 wifi_adapter_info_t *adapter = NULL;
7033 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
7034 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7037 DHD_ERROR(("%s\n", driver_target));
7038 #endif /* STBLINUX */
7039 /* will implement get_ids for DBUS later */
7040 #if defined(BCMSDIO)
7041 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
7043 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7045 /* Allocate primary dhd_info */
7046 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
7048 dhd = MALLOC(osh, sizeof(dhd_info_t));
7050 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
7054 memset(dhd, 0, sizeof(dhd_info_t));
7055 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
7057 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
7060 dhd->adapter = adapter;
7062 #ifdef GET_CUSTOM_MAC_ENABLE
7063 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
7064 #endif /* GET_CUSTOM_MAC_ENABLE */
7065 #ifdef CUSTOM_FORCE_NODFS_FLAG
7066 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
7067 dhd->pub.force_country_change = TRUE;
7068 #endif /* CUSTOM_FORCE_NODFS_FLAG */
7069 #ifdef CUSTOM_COUNTRY_CODE
7070 get_customized_country_code(dhd->adapter,
7071 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
7072 dhd->pub.dhd_cflags);
7073 #endif /* CUSTOM_COUNTRY_CODE */
7074 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
7075 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
7077 /* Initialize thread based operation and lock */
7078 sema_init(&dhd->sdsem, 1);
7080 /* Link to info module */
7081 dhd->pub.info = dhd;
7084 /* Link to bus module */
7086 dhd->pub.hdrlen = bus_hdrlen;
7088 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
7089 * because dhd_detech will check .info is NULL or not.
7091 if (dhd_conf_attach(&dhd->pub) != 0) {
7092 DHD_ERROR(("dhd_conf_attach failed\n"));
7095 dhd_conf_reset(&dhd->pub);
7096 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
7097 dhd_conf_preinit(&dhd->pub);
7099 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
7100 * This is indeed a hack but we have to make it work properly before we have a better
7103 dhd_update_fw_nv_path(dhd);
7104 #ifndef BUILD_IN_KERNEL
7105 dhd_conf_read_config(&dhd->pub, dhd->conf_path);
7108 /* Set network interface name if it was provided as module parameter */
7109 if (iface_name[0]) {
7112 strncpy(if_name, iface_name, IFNAMSIZ);
7113 if_name[IFNAMSIZ - 1] = 0;
7114 len = strlen(if_name);
7115 ch = if_name[len - 1];
7116 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
7117 strcat(if_name, "%d");
7120 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
7121 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
7127 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
7128 #ifdef DHD_L2_FILTER
7129 /* initialize the l2_filter_cnt */
7130 dhd->pub.l2_filter_cnt = 0;
7132 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7135 net->netdev_ops = NULL;
7138 mutex_init(&dhd->dhd_iovar_mutex);
7139 sema_init(&dhd->proto_sem, 1);
7141 #ifdef PROP_TXSTATUS
7142 spin_lock_init(&dhd->wlfc_spinlock);
7144 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
7145 dhd->pub.plat_init = dhd_wlfc_plat_init;
7146 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
7148 #ifdef DHD_WLFC_THREAD
7149 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
7150 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
7151 if (IS_ERR(dhd->pub.wlfc_thread)) {
7152 DHD_ERROR(("create wlfc thread failed\n"));
7155 wake_up_process(dhd->pub.wlfc_thread);
7157 #endif /* DHD_WLFC_THREAD */
7158 #endif /* PROP_TXSTATUS */
7160 /* Initialize other structure content */
7161 init_waitqueue_head(&dhd->ioctl_resp_wait);
7162 init_waitqueue_head(&dhd->d3ack_wait);
7163 init_waitqueue_head(&dhd->ctrl_wait);
7164 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
7165 dhd->pub.dhd_bus_busy_state = 0;
7167 /* Initialize the spinlocks */
7168 spin_lock_init(&dhd->sdlock);
7169 spin_lock_init(&dhd->txqlock);
7170 spin_lock_init(&dhd->dhd_lock);
7171 spin_lock_init(&dhd->rxf_lock);
7172 #if defined(RXFRAME_THREAD)
7173 dhd->rxthread_enabled = TRUE;
7174 #endif /* defined(RXFRAME_THREAD) */
7176 #ifdef DHDTCPACK_SUPPRESS
7177 spin_lock_init(&dhd->tcpack_lock);
7178 #endif /* DHDTCPACK_SUPPRESS */
7180 /* Initialize Wakelock stuff */
7181 spin_lock_init(&dhd->wakelock_spinlock);
7182 spin_lock_init(&dhd->wakelock_evt_spinlock);
7183 DHD_OS_WAKE_LOCK_INIT(dhd);
7184 dhd->wakelock_wd_counter = 0;
7185 #ifdef CONFIG_HAS_WAKELOCK
7186 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
7187 #endif /* CONFIG_HAS_WAKELOCK */
7189 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7190 mutex_init(&dhd->dhd_net_if_mutex);
7191 mutex_init(&dhd->dhd_suspend_mutex);
7193 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7195 /* Attach and link in the protocol */
7196 if (dhd_prot_attach(&dhd->pub) != 0) {
7197 DHD_ERROR(("dhd_prot_attach failed\n"));
7200 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
7203 /* Attach and link in the cfg80211 */
7204 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
7205 DHD_ERROR(("wl_cfg80211_attach failed\n"));
7209 dhd_monitor_init(&dhd->pub);
7210 dhd_state |= DHD_ATTACH_STATE_CFG80211;
7213 dhd_log_dump_init(&dhd->pub);
7214 #endif /* DHD_LOG_DUMP */
7215 #if defined(WL_WIRELESS_EXT)
7216 /* Attach and link in the iw */
7217 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7218 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
7219 DHD_ERROR(("wl_iw_attach failed\n"));
7222 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
7224 #endif /* defined(WL_WIRELESS_EXT) */
7226 #ifdef SHOW_LOGTRACE
7227 dhd_init_logstrs_array(&dhd->event_data);
7228 dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
7229 dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
7230 #endif /* SHOW_LOGTRACE */
7232 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
7233 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
7239 /* Set up the watchdog timer */
7240 init_timer(&dhd->timer);
7241 dhd->timer.data = (ulong)dhd;
7242 dhd->timer.function = dhd_watchdog;
7243 dhd->default_wd_interval = dhd_watchdog_ms;
7245 if (dhd_watchdog_prio >= 0) {
7246 /* Initialize watchdog thread */
7247 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
7248 if (dhd->thr_wdt_ctl.thr_pid < 0) {
7253 dhd->thr_wdt_ctl.thr_pid = -1;
7256 #ifdef DHD_PCIE_RUNTIMEPM
7257 /* Setup up the runtime PM Idlecount timer */
7258 init_timer(&dhd->rpm_timer);
7259 dhd->rpm_timer.data = (ulong)dhd;
7260 dhd->rpm_timer.function = dhd_runtimepm;
7261 dhd->rpm_timer_valid = FALSE;
7263 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
7264 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
7265 if (dhd->thr_rpm_ctl.thr_pid < 0) {
7268 #endif /* DHD_PCIE_RUNTIMEPM */
7271 debugger_init((void *) bus);
7274 /* Set up the bottom half handler */
7275 if (dhd_dpc_prio >= 0) {
7276 /* Initialize DPC thread */
7277 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
7278 if (dhd->thr_dpc_ctl.thr_pid < 0) {
7282 /* use tasklet for dpc */
7283 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
7284 dhd->thr_dpc_ctl.thr_pid = -1;
7287 if (dhd->rxthread_enabled) {
7288 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
7289 /* Initialize RXF thread */
7290 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
7291 if (dhd->thr_rxf_ctl.thr_pid < 0) {
7296 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
7298 #if defined(CONFIG_PM_SLEEP)
7299 if (!dhd_pm_notifier_registered) {
7300 dhd_pm_notifier_registered = TRUE;
7301 dhd->pm_notifier.notifier_call = dhd_pm_callback;
7302 dhd->pm_notifier.priority = 10;
7303 register_pm_notifier(&dhd->pm_notifier);
7306 #endif /* CONFIG_PM_SLEEP */
7308 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7309 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
7310 dhd->early_suspend.suspend = dhd_early_suspend;
7311 dhd->early_suspend.resume = dhd_late_resume;
7312 register_early_suspend(&dhd->early_suspend);
7313 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
7314 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7316 #ifdef ARP_OFFLOAD_SUPPORT
7317 dhd->pend_ipaddr = 0;
7318 if (!dhd_inetaddr_notifier_registered) {
7319 dhd_inetaddr_notifier_registered = TRUE;
7320 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7322 #endif /* ARP_OFFLOAD_SUPPORT */
7324 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7325 if (!dhd_inet6addr_notifier_registered) {
7326 dhd_inet6addr_notifier_registered = TRUE;
7327 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7329 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7330 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
7331 #ifdef DEBUG_CPU_FREQ
7332 dhd->new_freq = alloc_percpu(int);
7333 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
7334 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7336 #ifdef DHDTCPACK_SUPPRESS
7338 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
7339 #elif defined(BCMPCIE)
7340 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
7342 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7343 #endif /* BCMSDIO */
7344 #endif /* DHDTCPACK_SUPPRESS */
7346 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7347 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7349 dhd_state |= DHD_ATTACH_STATE_DONE;
7350 dhd->dhd_state = dhd_state;
7353 #ifdef DHD_DEBUG_PAGEALLOC
7354 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
7355 #endif /* DHD_DEBUG_PAGEALLOC */
7358 DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
7360 dhd_lb_set_default_cpus(dhd);
7362 /* Initialize the CPU Masks */
7363 if (dhd_cpumasks_init(dhd) == 0) {
7365 /* Now we have the current CPU maps, run through candidacy */
7366 dhd_select_cpu_candidacy(dhd);
7369 * If we are able to initialize CPU masks, lets register to the
7370 * CPU Hotplug framework to change the CPU for each job dynamically
7371 * using candidacy algorithm.
7373 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
7374 register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
7377 * We are unable to initialize CPU masks, so candidacy algorithm
7378 * won't run, but still Load Balancing will be honoured based
7379 * on the CPUs allocated for a given job statically during init
7381 dhd->cpu_notifier.notifier_call = NULL;
7382 DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
7387 DHD_LB_STATS_INIT(&dhd->pub);
7389 /* Initialize the Load Balancing Tasklets and Napi object */
7390 #if defined(DHD_LB_TXC)
7391 tasklet_init(&dhd->tx_compl_tasklet,
7392 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
7393 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
7394 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
7395 #endif /* DHD_LB_TXC */
7397 #if defined(DHD_LB_RXC)
7398 tasklet_init(&dhd->rx_compl_tasklet,
7399 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
7400 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
7401 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
7402 #endif /* DHD_LB_RXC */
7404 #if defined(DHD_LB_RXP)
7405 __skb_queue_head_init(&dhd->rx_pend_queue);
7406 skb_queue_head_init(&dhd->rx_napi_queue);
7408 /* Initialize the work that dispatches NAPI job to a given core */
7409 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
7410 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
7411 #endif /* DHD_LB_RXP */
7415 INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
7417 (void)dhd_sysfs_init(dhd);
7422 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
7423 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
7424 __FUNCTION__, dhd_state, &dhd->pub));
7425 dhd->dhd_state = dhd_state;
7426 dhd_detach(&dhd->pub);
7427 dhd_free(&dhd->pub);
7433 #include <linux/delay.h>
7435 void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
7437 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7439 schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
7442 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
7444 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
7445 return DHD_FLAG_HOSTAP_MODE;
7446 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
7447 return DHD_FLAG_P2P_MODE;
7448 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
7449 return DHD_FLAG_IBSS_MODE;
7450 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
7451 return DHD_FLAG_MFG_MODE;
7453 return DHD_FLAG_STA_MODE;
7456 extern int rkwifi_set_firmware(char *fw, char *nvram);
7457 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
7462 const char *fw = NULL;
7463 const char *nv = NULL;
7464 const char *conf = NULL;
7465 char firmware[100] = {0};
7466 char nvram[100] = {0};
7467 wifi_adapter_info_t *adapter = dhdinfo->adapter;
7470 /* Update firmware and nvram path. The path may be from adapter info or module parameter
7471 * The path from adapter info is used for initialization only (as it won't change).
7473 * The firmware_path/nvram_path module parameter may be changed by the system at run
7474 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
7475 * command may change dhdinfo->fw_path. As such we need to clear the path info in
7476 * module parameter after it is copied. We won't update the path until the module parameter
7477 * is changed again (first character is not '\0')
7480 /* set default firmware and nvram path for built-in type driver */
7481 // if (!dhd_download_fw_on_driverload) {
7482 rkwifi_set_firmware(firmware, nvram);
7483 #ifdef CONFIG_BCMDHD_FW_PATH
7484 fw = CONFIG_BCMDHD_FW_PATH;
7487 #endif /* CONFIG_BCMDHD_FW_PATH */
7488 #ifdef CONFIG_BCMDHD_NVRAM_PATH
7489 nv = CONFIG_BCMDHD_NVRAM_PATH;
7492 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
7495 /* check if we need to initialize the path */
7496 if (dhdinfo->fw_path[0] == '\0') {
7497 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
7498 fw = adapter->fw_path;
7501 if (dhdinfo->nv_path[0] == '\0') {
7502 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
7503 nv = adapter->nv_path;
7505 if (dhdinfo->conf_path[0] == '\0') {
7506 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
7507 conf = adapter->conf_path;
7510 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
7512 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
7514 if (firmware_path[0] != '\0')
7516 if (nvram_path[0] != '\0')
7518 if (config_path[0] != '\0')
7521 if (fw && fw[0] != '\0') {
7522 fw_len = strlen(fw);
7523 if (fw_len >= sizeof(dhdinfo->fw_path)) {
7524 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
7527 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
7528 if (dhdinfo->fw_path[fw_len-1] == '\n')
7529 dhdinfo->fw_path[fw_len-1] = '\0';
7531 if (nv && nv[0] != '\0') {
7532 nv_len = strlen(nv);
7533 if (nv_len >= sizeof(dhdinfo->nv_path)) {
7534 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
7537 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
7538 if (dhdinfo->nv_path[nv_len-1] == '\n')
7539 dhdinfo->nv_path[nv_len-1] = '\0';
7541 if (conf && conf[0] != '\0') {
7542 conf_len = strlen(conf);
7543 if (conf_len >= sizeof(dhdinfo->conf_path)) {
7544 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
7547 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
7548 if (dhdinfo->conf_path[conf_len-1] == '\n')
7549 dhdinfo->conf_path[conf_len-1] = '\0';
7553 /* clear the path in module parameter */
7554 if (dhd_download_fw_on_driverload) {
7555 firmware_path[0] = '\0';
7556 nvram_path[0] = '\0';
7557 config_path[0] = '\0';
7561 #ifndef BCMEMBEDIMAGE
7562 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
7563 if (dhdinfo->fw_path[0] == '\0') {
7564 DHD_ERROR(("firmware path not found\n"));
7567 if (dhdinfo->nv_path[0] == '\0') {
7568 DHD_ERROR(("nvram path not found\n"));
7571 if (dhdinfo->conf_path[0] == '\0') {
7572 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
7574 #ifdef CONFIG_PATH_AUTO_SELECT
7575 dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path);
7577 #endif /* BCMEMBEDIMAGE */
7582 #ifdef CUSTOMER_HW4_DEBUG
7583 bool dhd_validate_chipid(dhd_pub_t *dhdp)
7585 uint chipid = dhd_bus_chip_id(dhdp);
7589 config_chipid = BCM4359_CHIP_ID;
7590 #elif defined(BCM4358_CHIP)
7591 config_chipid = BCM4358_CHIP_ID;
7592 #elif defined(BCM4354_CHIP)
7593 config_chipid = BCM4354_CHIP_ID;
7594 #elif defined(BCM4356_CHIP)
7595 config_chipid = BCM4356_CHIP_ID;
7596 #elif defined(BCM4339_CHIP)
7597 config_chipid = BCM4339_CHIP_ID;
7598 #elif defined(BCM43349_CHIP)
7599 config_chipid = BCM43349_CHIP_ID;
7600 #elif defined(BCM4335_CHIP)
7601 config_chipid = BCM4335_CHIP_ID;
7602 #elif defined(BCM43241_CHIP)
7603 config_chipid = BCM4324_CHIP_ID;
7604 #elif defined(BCM4330_CHIP)
7605 config_chipid = BCM4330_CHIP_ID;
7606 #elif defined(BCM43430_CHIP)
7607 config_chipid = BCM43430_CHIP_ID;
7608 #elif defined(BCM4334W_CHIP)
7609 config_chipid = BCM43342_CHIP_ID;
7610 #elif defined(BCM43455_CHIP)
7611 config_chipid = BCM4345_CHIP_ID;
7613 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
7614 " please add CONFIG_BCMXXXX into the Kernel and"
7615 " BCMXXXX_CHIP definition into the DHD driver\n",
7620 #endif /* BCM4354_CHIP */
7622 #if defined(BCM4359_CHIP)
7623 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
7626 #endif /* BCM4359_CHIP */
7628 return config_chipid == chipid;
7630 #endif /* CUSTOMER_HW4_DEBUG */
7633 dhd_bus_start(dhd_pub_t *dhdp)
7636 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7637 unsigned long flags;
7641 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
7643 DHD_PERIM_LOCK(dhdp);
7645 /* try to download image and nvram to the dongle */
7646 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
7647 /* Indicate FW Download has not yet done */
7648 dhd->pub.is_fw_download_done = FALSE;
7649 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
7650 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
7651 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
7652 dhd->fw_path, dhd->nv_path, dhd->conf_path);
7654 DHD_ERROR(("%s: failed to download firmware %s\n",
7655 __FUNCTION__, dhd->fw_path));
7656 DHD_PERIM_UNLOCK(dhdp);
7659 /* Indicate FW Download has succeeded */
7660 dhd->pub.is_fw_download_done = TRUE;
7662 if (dhd->pub.busstate != DHD_BUS_LOAD) {
7663 DHD_PERIM_UNLOCK(dhdp);
7667 dhd_os_sdlock(dhdp);
7669 /* Start the watchdog timer */
7670 dhd->pub.tickcnt = 0;
7671 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
7672 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
7674 /* Bring up the bus */
7675 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
7677 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
7678 dhd_os_sdunlock(dhdp);
7679 DHD_PERIM_UNLOCK(dhdp);
7682 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7683 #if defined(BCMPCIE_OOB_HOST_WAKE)
7684 dhd_os_sdunlock(dhdp);
7685 #endif /* BCMPCIE_OOB_HOST_WAKE */
7686 /* Host registration for OOB interrupt */
7687 if (dhd_bus_oob_intr_register(dhdp)) {
7688 /* deactivate timer and wait for the handler to finish */
7689 #if !defined(BCMPCIE_OOB_HOST_WAKE)
7690 DHD_GENERAL_LOCK(&dhd->pub, flags);
7691 dhd->wd_timer_valid = FALSE;
7692 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7693 del_timer_sync(&dhd->timer);
7695 dhd_os_sdunlock(dhdp);
7696 #endif /* !BCMPCIE_OOB_HOST_WAKE */
7697 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7698 DHD_PERIM_UNLOCK(dhdp);
7699 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7700 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
7704 #if defined(BCMPCIE_OOB_HOST_WAKE)
7705 dhd_os_sdlock(dhdp);
7706 dhd_bus_oob_intr_set(dhdp, TRUE);
7708 /* Enable oob at firmware */
7709 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7710 #endif /* BCMPCIE_OOB_HOST_WAKE */
7711 #elif defined(FORCE_WOWLAN)
7712 /* Enable oob at firmware */
7713 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7715 #ifdef PCIE_FULL_DONGLE
7717 /* max_h2d_rings includes H2D common rings */
7718 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
7720 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
7722 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
7723 dhd_os_sdunlock(dhdp);
7724 DHD_PERIM_UNLOCK(dhdp);
7728 #endif /* PCIE_FULL_DONGLE */
7730 /* Do protocol initialization necessary for IOCTL/IOVAR */
7731 #ifdef PCIE_FULL_DONGLE
7732 dhd_os_sdunlock(dhdp);
7733 #endif /* PCIE_FULL_DONGLE */
7734 ret = dhd_prot_init(&dhd->pub);
7735 if (unlikely(ret) != BCME_OK) {
7736 DHD_PERIM_UNLOCK(dhdp);
7737 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7740 #ifdef PCIE_FULL_DONGLE
7741 dhd_os_sdlock(dhdp);
7742 #endif /* PCIE_FULL_DONGLE */
7744 /* If bus is not ready, can't come up */
7745 if (dhd->pub.busstate != DHD_BUS_DATA) {
7746 DHD_GENERAL_LOCK(&dhd->pub, flags);
7747 dhd->wd_timer_valid = FALSE;
7748 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7749 del_timer_sync(&dhd->timer);
7750 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
7751 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7752 dhd_os_sdunlock(dhdp);
7753 DHD_PERIM_UNLOCK(dhdp);
7754 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7758 dhd_os_sdunlock(dhdp);
7760 /* Bus is ready, query any dongle information */
7761 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7762 DHD_GENERAL_LOCK(&dhd->pub, flags);
7763 dhd->wd_timer_valid = FALSE;
7764 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7765 del_timer_sync(&dhd->timer);
7766 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
7767 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7768 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7769 DHD_PERIM_UNLOCK(dhdp);
7773 #ifdef ARP_OFFLOAD_SUPPORT
7774 if (dhd->pend_ipaddr) {
7775 #ifdef AOE_IP_ALIAS_SUPPORT
7776 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
7777 #endif /* AOE_IP_ALIAS_SUPPORT */
7778 dhd->pend_ipaddr = 0;
7780 #endif /* ARP_OFFLOAD_SUPPORT */
7782 DHD_PERIM_UNLOCK(dhdp);
7787 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
7789 char iovbuf[WLC_IOCTL_SMLEN];
7790 uint32 tdls = tdls_on;
7792 uint32 tdls_auto_op = 0;
7793 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
7794 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
7795 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
7797 if (!FW_SUPPORTED(dhd, tdls))
7800 if (dhd->tdls_enable == tdls_on)
7802 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
7803 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7804 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
7807 dhd->tdls_enable = tdls_on;
7810 tdls_auto_op = auto_on;
7811 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
7812 iovbuf, sizeof(iovbuf));
7813 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7814 sizeof(iovbuf), TRUE, 0)) < 0) {
7815 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
7820 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
7821 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
7822 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7823 sizeof(iovbuf), TRUE, 0)) < 0) {
7824 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
7827 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
7828 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7829 sizeof(iovbuf), TRUE, 0)) < 0) {
7830 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
7833 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
7834 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7835 sizeof(iovbuf), TRUE, 0)) < 0) {
7836 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
7845 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
7847 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7850 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
7856 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
7858 char iovbuf[WLC_IOCTL_SMLEN];
7860 bool auto_on = false;
7861 uint32 mode = wfd_mode;
7863 #ifdef ENABLE_TDLS_AUTO_MODE
7871 #endif /* ENABLE_TDLS_AUTO_MODE */
7872 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
7874 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
7879 bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
7880 iovbuf, sizeof(iovbuf));
7881 if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7882 sizeof(iovbuf), TRUE, 0)) < 0) &&
7883 (ret != BCME_UNSUPPORTED)) {
7884 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
7888 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
7890 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
7894 dhd->tdls_mode = mode;
7897 #ifdef PCIE_FULL_DONGLE
7898 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
7900 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7901 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
7902 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
7903 tdls_peer_node_t *new = NULL, *prev = NULL;
7905 uint8 sa[ETHER_ADDR_LEN];
7906 int ifidx = dhd_net2idx(dhd, dev);
7908 if (ifidx == DHD_BAD_IF)
7911 dhdif = dhd->iflist[ifidx];
7912 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
7915 while (cur != NULL) {
7916 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7917 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
7918 __FUNCTION__, __LINE__));
7924 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
7926 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
7929 memcpy(new->addr, da, ETHER_ADDR_LEN);
7930 new->next = dhdp->peer_tbl.node;
7931 dhdp->peer_tbl.node = new;
7932 dhdp->peer_tbl.tdls_peer_count++;
7935 while (cur != NULL) {
7936 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7937 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
7939 prev->next = cur->next;
7941 dhdp->peer_tbl.node = cur->next;
7942 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
7943 dhdp->peer_tbl.tdls_peer_count--;
7949 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
7952 #endif /* PCIE_FULL_DONGLE */
7955 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
7960 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
7962 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
7963 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
7968 #if !defined(AP) && defined(WLP2P)
7969 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
7970 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
7971 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
7972 * would still be named as fw_bcmdhd_apsta.
7975 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
7978 char buf[WLC_IOCTL_SMLEN];
7979 bool mchan_supported = FALSE;
7980 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
7981 * test mode, that means we only will use the mode as it is
7983 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
7985 if (FW_SUPPORTED(dhd, vsdb)) {
7986 mchan_supported = TRUE;
7988 if (!FW_SUPPORTED(dhd, p2p)) {
7989 DHD_TRACE(("Chip does not support p2p\n"));
7992 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
7993 memset(buf, 0, sizeof(buf));
7994 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
7995 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
7997 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
8001 /* By default, chip supports single chan concurrency,
8002 * now lets check for mchan
8004 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
8005 if (mchan_supported)
8006 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
8007 if (FW_SUPPORTED(dhd, rsdb)) {
8008 ret |= DHD_FLAG_RSDB_MODE;
8010 if (FW_SUPPORTED(dhd, mp2p)) {
8011 ret |= DHD_FLAG_MP2P_MODE;
8013 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
8017 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
8025 #ifdef SUPPORT_AP_POWERSAVE
8026 #define RXCHAIN_PWRSAVE_PPS 10
8027 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
8028 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
8029 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
8032 int32 pps = RXCHAIN_PWRSAVE_PPS;
8033 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
8034 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
8037 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8038 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8039 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8040 DHD_ERROR(("Failed to enable AP power save\n"));
8042 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
8043 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8044 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8045 DHD_ERROR(("Failed to set pps\n"));
8047 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
8048 4, iovbuf, sizeof(iovbuf));
8049 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8050 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8051 DHD_ERROR(("Failed to set quiet time\n"));
8053 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
8054 4, iovbuf, sizeof(iovbuf));
8055 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8056 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8057 DHD_ERROR(("Failed to set stas assoc check\n"));
8060 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8061 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8062 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8063 DHD_ERROR(("Failed to disable AP power save\n"));
8069 #endif /* SUPPORT_AP_POWERSAVE */
8073 dhd_preinit_ioctls(dhd_pub_t *dhd)
8076 char eventmask[WL_EVENTING_MASK_LEN];
8077 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
8078 uint32 buf_key_b4_m4 = 1;
8083 eventmsgs_ext_t *eventmask_msg = NULL;
8084 char* iov_buf = NULL;
8086 #if defined(CUSTOM_AMPDU_BA_WSIZE)
8087 uint32 ampdu_ba_wsize = 0;
8089 #if defined(CUSTOM_AMPDU_MPDU)
8090 int32 ampdu_mpdu = 0;
8092 #if defined(CUSTOM_AMPDU_RELEASE)
8093 int32 ampdu_release = 0;
8095 #if defined(CUSTOM_AMSDU_AGGSF)
8096 int32 amsdu_aggsf = 0;
8098 #ifdef SUPPORT_SENSORHUB
8099 int32 shub_enable = 0;
8100 #endif /* SUPPORT_SENSORHUB */
8101 #if defined(BCMSDIO)
8102 #ifdef PROP_TXSTATUS
8103 int wlfc_enable = TRUE;
8105 uint32 hostreorder = 1;
8107 #endif /* DISABLE_11N */
8108 #endif /* PROP_TXSTATUS */
8110 #ifdef PCIE_FULL_DONGLE
8111 uint32 wl_ap_isolate;
8112 #endif /* PCIE_FULL_DONGLE */
8114 #if defined(BCMSDIO)
8115 /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
8116 uint32 frameburst = 0;
8118 uint32 frameburst = 1;
8119 #endif /* BCMSDIO */
8121 #ifdef DHD_ENABLE_LPC
8123 #endif /* DHD_ENABLE_LPC */
8124 uint power_mode = PM_FAST;
8125 #if defined(BCMSDIO)
8126 uint32 dongle_align = DHD_SDALIGN;
8127 uint32 glom = CUSTOM_GLOM_SETTING;
8128 #endif /* defined(BCMSDIO) */
8129 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8132 uint bcn_timeout = dhd->conf->bcn_timeout;
8133 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
8134 uint32 bcn_li_bcn = 1;
8135 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8136 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
8137 #if defined(ARP_OFFLOAD_SUPPORT)
8140 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
8141 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
8142 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
8143 char buf[WLC_IOCTL_SMLEN];
8145 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
8148 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
8149 int roam_scan_period[2] = {10, WLC_BAND_ALL};
8150 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
8151 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
8152 int roam_fullscan_period = 60;
8153 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8154 int roam_fullscan_period = 120;
8155 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8157 #ifdef DISABLE_BUILTIN_ROAM
8159 #endif /* DISABLE_BUILTIN_ROAM */
8160 #endif /* ROAM_ENABLE */
8165 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
8166 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
8167 struct ether_addr p2p_ea;
8169 #ifdef SOFTAP_UAPSD_OFF
8170 uint32 wme_apsd = 0;
8171 #endif /* SOFTAP_UAPSD_OFF */
8172 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
8173 uint32 apsta = 1; /* Enable APSTA mode */
8174 #elif defined(SOFTAP_AND_GC)
8177 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
8178 #ifdef GET_CUSTOM_MAC_ENABLE
8179 struct ether_addr ea_addr;
8180 #endif /* GET_CUSTOM_MAC_ENABLE */
8184 #endif /* DISABLE_11N */
8188 #endif /* USE_WL_TXBF */
8189 #if defined(PROP_TXSTATUS)
8190 #ifdef USE_WFA_CERT_CONF
8192 #endif /* USE_WFA_CERT_CONF */
8193 #endif /* PROP_TXSTATUS */
8194 #ifdef CUSTOM_PSPRETEND_THR
8195 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
8197 uint32 rsdb_mode = 0;
8198 #ifdef ENABLE_TEMP_THROTTLING
8199 wl_temp_control_t temp_control;
8200 #endif /* ENABLE_TEMP_THROTTLING */
8201 #ifdef DISABLE_PRUNED_SCAN
8202 uint32 scan_features = 0;
8203 #endif /* DISABLE_PRUNED_SCAN */
8204 #ifdef CUSTOM_EVENT_PM_WAKE
8205 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
8206 #endif /* CUSTOM_EVENT_PM_WAKE */
8207 #ifdef PKT_FILTER_SUPPORT
8208 dhd_pkt_filter_enable = TRUE;
8209 #endif /* PKT_FILTER_SUPPORT */
8211 dhd->tdls_enable = FALSE;
8212 dhd_tdls_set_mode(dhd, false);
8214 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
8215 DHD_TRACE(("Enter %s\n", __FUNCTION__));
8217 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
8218 #ifdef DHDTCPACK_SUPPRESS
8219 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
8220 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
8224 #ifdef CUSTOMER_HW4_DEBUG
8225 if (!dhd_validate_chipid(dhd)) {
8226 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
8227 __FUNCTION__, dhd_bus_chip_id(dhd)));
8228 #ifndef SUPPORT_MULTIPLE_CHIPS
8231 #endif /* !SUPPORT_MULTIPLE_CHIPS */
8233 #endif /* CUSTOMER_HW4_DEBUG */
8234 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8235 (op_mode == DHD_FLAG_MFG_MODE)) {
8236 #ifdef DHD_PCIE_RUNTIMEPM
8237 /* Disable RuntimePM in mfg mode */
8238 DHD_DISABLE_RUNTIME_PM(dhd);
8239 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
8240 #endif /* DHD_PCIE_RUNTIME_PM */
8241 /* Check and adjust IOCTL response timeout for Manufactring firmware */
8242 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
8243 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
8246 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
8247 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
8249 #ifdef GET_CUSTOM_MAC_ENABLE
8250 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
8252 memset(buf, 0, sizeof(buf));
8253 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
8254 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8256 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
8257 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
8261 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
8263 #endif /* GET_CUSTOM_MAC_ENABLE */
8264 /* Get the default device MAC address directly from firmware */
8265 memset(buf, 0, sizeof(buf));
8266 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
8267 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8269 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
8273 /* Update public MAC address after reading from Firmware */
8274 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
8276 #ifdef GET_CUSTOM_MAC_ENABLE
8278 #endif /* GET_CUSTOM_MAC_ENABLE */
8280 /* get a capabilities from firmware */
8282 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
8283 memset(dhd->fw_capabilities, 0, cap_buf_size);
8284 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
8285 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
8286 (cap_buf_size - 1), FALSE, 0)) < 0)
8288 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
8289 __FUNCTION__, ret));
8293 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
8294 dhd->fw_capabilities[0] = ' ';
8295 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
8296 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
8299 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
8300 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
8301 #ifdef SET_RANDOM_MAC_SOFTAP
8303 #endif /* SET_RANDOM_MAC_SOFTAP */
8304 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
8305 #if defined(ARP_OFFLOAD_SUPPORT)
8308 #ifdef PKT_FILTER_SUPPORT
8309 dhd_pkt_filter_enable = FALSE;
8311 #ifdef SET_RANDOM_MAC_SOFTAP
8312 SRANDOM32((uint)jiffies);
8313 rand_mac = RANDOM32();
8314 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
8315 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
8316 iovbuf[2] = (unsigned char)vendor_oui;
8317 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
8318 iovbuf[4] = (unsigned char)(rand_mac >> 8);
8319 iovbuf[5] = (unsigned char)(rand_mac >> 16);
8321 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
8322 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8324 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
8326 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
8327 #endif /* SET_RANDOM_MAC_SOFTAP */
8328 #if !defined(AP) && defined(WL_CFG80211)
8329 /* Turn off MPC in AP mode */
8330 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8331 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8332 sizeof(iovbuf), TRUE, 0)) < 0) {
8333 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
8336 #ifdef USE_DYNAMIC_F2_BLKSIZE
8337 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8338 #endif /* USE_DYNAMIC_F2_BLKSIZE */
8339 #ifdef SUPPORT_AP_POWERSAVE
8340 dhd_set_ap_powersave(dhd, 0, TRUE);
8341 #endif /* SUPPORT_AP_POWERSAVE */
8342 #ifdef SOFTAP_UAPSD_OFF
8343 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
8344 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8345 sizeof(iovbuf), TRUE, 0)) < 0) {
8346 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
8347 __FUNCTION__, ret));
8349 #endif /* SOFTAP_UAPSD_OFF */
8350 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8351 (op_mode == DHD_FLAG_MFG_MODE)) {
8352 #if defined(ARP_OFFLOAD_SUPPORT)
8354 #endif /* ARP_OFFLOAD_SUPPORT */
8355 #ifdef PKT_FILTER_SUPPORT
8356 dhd_pkt_filter_enable = FALSE;
8357 #endif /* PKT_FILTER_SUPPORT */
8358 dhd->op_mode = DHD_FLAG_MFG_MODE;
8359 #ifdef USE_DYNAMIC_F2_BLKSIZE
8360 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8361 #endif /* USE_DYNAMIC_F2_BLKSIZE */
8362 if (FW_SUPPORTED(dhd, rsdb)) {
8364 bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
8365 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8366 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8367 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
8368 __FUNCTION__, ret));
8372 uint32 concurrent_mode = 0;
8373 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
8374 (op_mode == DHD_FLAG_P2P_MODE)) {
8375 #if defined(ARP_OFFLOAD_SUPPORT)
8378 #ifdef PKT_FILTER_SUPPORT
8379 dhd_pkt_filter_enable = FALSE;
8381 dhd->op_mode = DHD_FLAG_P2P_MODE;
8382 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
8383 (op_mode == DHD_FLAG_IBSS_MODE)) {
8384 dhd->op_mode = DHD_FLAG_IBSS_MODE;
8386 dhd->op_mode = DHD_FLAG_STA_MODE;
8387 #if !defined(AP) && defined(WLP2P)
8388 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
8389 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
8390 #if defined(ARP_OFFLOAD_SUPPORT)
8393 dhd->op_mode |= concurrent_mode;
8396 /* Check if we are enabling p2p */
8397 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8398 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8399 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8400 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8401 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
8404 #if defined(SOFTAP_AND_GC)
8405 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
8406 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
8407 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
8410 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
8411 ETHER_SET_LOCALADDR(&p2p_ea);
8412 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
8413 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
8414 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8415 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8416 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
8418 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
8422 (void)concurrent_mode;
8426 #ifdef RSDB_MODE_FROM_FILE
8427 (void)dhd_rsdb_mode_from_file(dhd);
8428 #endif /* RSDB_MODE_FROM_FILE */
8430 #ifdef DISABLE_PRUNED_SCAN
8431 if (FW_SUPPORTED(dhd, rsdb)) {
8432 memset(iovbuf, 0, sizeof(iovbuf));
8433 bcm_mkiovar("scan_features", (char *)&scan_features,
8434 4, iovbuf, sizeof(iovbuf));
8435 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
8436 iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8437 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
8438 __FUNCTION__, ret));
8440 memcpy(&scan_features, iovbuf, 4);
8441 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
8442 memset(iovbuf, 0, sizeof(iovbuf));
8443 bcm_mkiovar("scan_features", (char *)&scan_features,
8444 4, iovbuf, sizeof(iovbuf));
8445 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8446 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8447 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
8448 __FUNCTION__, ret));
8452 #endif /* DISABLE_PRUNED_SCAN */
8454 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
8455 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
8456 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
8457 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
8458 dhd->info->rxthread_enabled = FALSE;
8460 dhd->info->rxthread_enabled = TRUE;
8462 /* Set Country code */
8463 if (dhd->dhd_cspec.ccode[0] != 0) {
8464 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
8465 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
8466 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
8467 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8468 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
8470 dhd_conf_set_country(dhd);
8471 dhd_conf_fix_country(dhd);
8473 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
8476 /* Set Listen Interval */
8477 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
8478 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8479 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
8481 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
8482 #ifdef USE_WFA_CERT_CONF
8483 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
8484 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
8486 #endif /* USE_WFA_CERT_CONF */
8487 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
8488 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
8489 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8490 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
8491 #if defined(ROAM_ENABLE)
8492 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
8493 sizeof(roam_trigger), TRUE, 0)) < 0)
8494 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
8495 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
8496 sizeof(roam_scan_period), TRUE, 0)) < 0)
8497 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
8498 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
8499 sizeof(roam_delta), TRUE, 0)) < 0)
8500 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
8501 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
8502 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8503 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
8504 #endif /* ROAM_ENABLE */
8505 dhd_conf_set_roam(dhd);
8507 #ifdef CUSTOM_EVENT_PM_WAKE
8508 bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
8509 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8510 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
8512 #endif /* CUSTOM_EVENT_PM_WAKE */
8514 #ifdef ENABLE_TDLS_AUTO_MODE
8515 /* by default TDLS on and auto mode on */
8516 _dhd_tdls_enable(dhd, true, true, NULL);
8518 /* by default TDLS on and auto mode off */
8519 _dhd_tdls_enable(dhd, true, false, NULL);
8520 #endif /* ENABLE_TDLS_AUTO_MODE */
8523 #ifdef DHD_ENABLE_LPC
8525 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8526 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8527 sizeof(iovbuf), TRUE, 0)) < 0) {
8528 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
8530 if (ret == BCME_NOTDOWN) {
8532 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
8533 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8534 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
8536 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8537 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8538 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
8541 #endif /* DHD_ENABLE_LPC */
8542 dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
8544 /* Set PowerSave mode */
8545 if (dhd->conf->pm >= 0)
8546 power_mode = dhd->conf->pm;
8547 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
8549 #if defined(BCMSDIO)
8550 /* Match Host and Dongle rx alignment */
8551 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
8552 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8554 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8555 /* enable credall to reduce the chance of no bus credit happened. */
8556 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
8557 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8560 #ifdef USE_WFA_CERT_CONF
8561 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
8562 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
8564 #endif /* USE_WFA_CERT_CONF */
8565 if (glom != DEFAULT_GLOM_VALUE) {
8566 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
8567 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
8568 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8570 #endif /* defined(BCMSDIO) */
8572 /* Setup timeout if Beacons are lost and roam is off to report link down */
8573 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
8574 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8575 /* Setup assoc_retry_max count to reconnect target AP in dongle */
8576 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
8577 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8578 #if defined(AP) && !defined(WLP2P)
8579 /* Turn off MPC in AP mode */
8580 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8581 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8582 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8583 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8584 #endif /* defined(AP) && !defined(WLP2P) */
8585 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
8586 dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
8587 dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
8588 dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
8589 dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
8590 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
8591 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
8592 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
8593 dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
8595 #ifdef MIMO_ANT_SETTING
8596 dhd_sel_ant_from_file(dhd);
8597 #endif /* MIMO_ANT_SETTING */
8600 if (ap_fw_loaded == TRUE) {
8601 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
8605 #if defined(KEEP_ALIVE)
8607 /* Set Keep Alive : be sure to use FW with -keepalive */
8611 if (ap_fw_loaded == FALSE)
8613 if (!(dhd->op_mode &
8614 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
8615 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
8616 DHD_ERROR(("%s set keeplive failed %d\n",
8617 __FUNCTION__, res));
8620 #endif /* defined(KEEP_ALIVE) */
8623 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
8624 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8625 sizeof(iovbuf), TRUE, 0)) < 0) {
8626 DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
8628 #endif /* USE_WL_TXBF */
8629 dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
8631 #ifdef USE_WFA_CERT_CONF
8632 #ifdef USE_WL_FRAMEBURST
8633 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
8634 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
8636 #endif /* USE_WL_FRAMEBURST */
8637 #ifdef DISABLE_FRAMEBURST_VSDB
8638 g_frameburst = frameburst;
8639 #endif /* DISABLE_FRAMEBURST_VSDB */
8640 #endif /* USE_WFA_CERT_CONF */
8641 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
8642 /* Disable Framebursting for SofAP */
8643 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
8646 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
8647 /* Set frameburst to value */
8648 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
8649 sizeof(frameburst), TRUE, 0)) < 0) {
8650 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
8652 dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE);
8653 #if defined(CUSTOM_AMPDU_BA_WSIZE)
8654 /* Set ampdu ba wsize to 64 or 16 */
8655 #ifdef CUSTOM_AMPDU_BA_WSIZE
8656 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
8658 if (ampdu_ba_wsize != 0) {
8659 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
8660 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8661 sizeof(iovbuf), TRUE, 0)) < 0) {
8662 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
8663 __FUNCTION__, ampdu_ba_wsize, ret));
8667 dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
8669 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
8670 if (iov_buf == NULL) {
8671 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
8675 #ifdef ENABLE_TEMP_THROTTLING
8676 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
8677 memset(&temp_control, 0, sizeof(temp_control));
8678 temp_control.enable = 1;
8679 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
8680 bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
8681 sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
8682 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
8684 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
8685 __FUNCTION__, ret));
8688 #endif /* ENABLE_TEMP_THROTTLING */
8689 #if defined(CUSTOM_AMPDU_MPDU)
8690 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
8691 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
8692 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
8693 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8694 sizeof(iovbuf), TRUE, 0)) < 0) {
8695 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
8696 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
8699 #endif /* CUSTOM_AMPDU_MPDU */
8701 #if defined(CUSTOM_AMPDU_RELEASE)
8702 ampdu_release = CUSTOM_AMPDU_RELEASE;
8703 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
8704 bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf));
8705 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8706 sizeof(iovbuf), TRUE, 0)) < 0) {
8707 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
8708 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
8711 #endif /* CUSTOM_AMPDU_RELEASE */
8713 #if defined(CUSTOM_AMSDU_AGGSF)
8714 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
8715 if (amsdu_aggsf != 0) {
8716 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
8717 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8719 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
8720 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
8723 #endif /* CUSTOM_AMSDU_AGGSF */
8725 #ifdef CUSTOM_PSPRETEND_THR
8726 /* Turn off MPC in AP mode */
8727 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
8728 iovbuf, sizeof(iovbuf));
8729 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8730 sizeof(iovbuf), TRUE, 0)) < 0) {
8731 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
8732 __FUNCTION__, ret));
8736 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
8737 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8738 sizeof(iovbuf), TRUE, 0)) < 0) {
8739 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
8742 /* Read event_msgs mask */
8743 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8744 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8745 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
8748 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
8750 /* Setup event_msgs */
8751 setbit(eventmask, WLC_E_SET_SSID);
8752 setbit(eventmask, WLC_E_PRUNE);
8753 setbit(eventmask, WLC_E_AUTH);
8754 setbit(eventmask, WLC_E_AUTH_IND);
8755 setbit(eventmask, WLC_E_ASSOC);
8756 setbit(eventmask, WLC_E_REASSOC);
8757 setbit(eventmask, WLC_E_REASSOC_IND);
8758 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
8759 setbit(eventmask, WLC_E_DEAUTH);
8760 setbit(eventmask, WLC_E_DEAUTH_IND);
8761 setbit(eventmask, WLC_E_DISASSOC_IND);
8762 setbit(eventmask, WLC_E_DISASSOC);
8763 setbit(eventmask, WLC_E_JOIN);
8764 setbit(eventmask, WLC_E_START);
8765 setbit(eventmask, WLC_E_ASSOC_IND);
8766 setbit(eventmask, WLC_E_PSK_SUP);
8767 setbit(eventmask, WLC_E_LINK);
8768 setbit(eventmask, WLC_E_MIC_ERROR);
8769 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
8770 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
8772 setbit(eventmask, WLC_E_PMKID_CACHE);
8773 setbit(eventmask, WLC_E_TXFAIL);
8775 setbit(eventmask, WLC_E_JOIN_START);
8776 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
8778 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
8781 setbit(eventmask, WLC_E_HTSFSYNC);
8782 #endif /* WLMEDIA_HTSF */
8784 setbit(eventmask, WLC_E_PFN_NET_FOUND);
8785 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
8786 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
8787 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
8788 #endif /* PNO_SUPPORT */
8789 /* enable dongle roaming event */
8790 setbit(eventmask, WLC_E_ROAM);
8791 setbit(eventmask, WLC_E_BSSID);
8793 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
8796 setbit(eventmask, WLC_E_ESCAN_RESULT);
8797 setbit(eventmask, WLC_E_AP_STARTED);
8798 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8799 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
8800 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
8802 #endif /* WL_CFG80211 */
8804 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
8805 if (dhd_logtrace_from_file(dhd)) {
8806 setbit(eventmask, WLC_E_TRACE);
8808 clrbit(eventmask, WLC_E_TRACE);
8810 #elif defined(SHOW_LOGTRACE)
8811 setbit(eventmask, WLC_E_TRACE);
8813 clrbit(eventmask, WLC_E_TRACE);
8814 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
8816 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
8817 #ifdef DHD_LOSSLESS_ROAMING
8818 setbit(eventmask, WLC_E_ROAM_PREP);
8820 #ifdef CUSTOM_EVENT_PM_WAKE
8821 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
8822 #endif /* CUSTOM_EVENT_PM_WAKE */
8823 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
8824 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
8825 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
8827 /* Write updated Event mask */
8828 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8829 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8830 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
8834 /* make up event mask ext message iovar for event larger than 128 */
8835 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
8836 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
8837 if (eventmask_msg == NULL) {
8838 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
8842 bzero(eventmask_msg, msglen);
8843 eventmask_msg->ver = EVENTMSGS_VER;
8844 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8846 /* Read event_msgs_ext mask */
8847 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
8848 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
8849 if (ret2 == 0) { /* event_msgs_ext must be supported */
8850 bcopy(iov_buf, eventmask_msg, msglen);
8851 #ifdef GSCAN_SUPPORT
8852 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
8853 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
8854 setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
8855 #endif /* GSCAN_SUPPORT */
8856 #ifdef BT_WIFI_HANDOVER
8857 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
8858 #endif /* BT_WIFI_HANDOVER */
8860 /* Write updated Event mask */
8861 eventmask_msg->ver = EVENTMSGS_VER;
8862 eventmask_msg->command = EVENTMSGS_SET_MASK;
8863 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8864 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
8865 msglen, iov_buf, WLC_IOCTL_SMLEN);
8866 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8867 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
8868 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
8871 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
8872 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
8873 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
8874 __FUNCTION__, ret2));
8876 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
8881 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
8882 sizeof(scan_assoc_time), TRUE, 0);
8883 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
8884 sizeof(scan_unassoc_time), TRUE, 0);
8885 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
8886 sizeof(scan_passive_time), TRUE, 0);
8888 #ifdef ARP_OFFLOAD_SUPPORT
8889 /* Set and enable ARP offload feature for STA only */
8891 if (arpoe && !ap_fw_loaded)
8896 dhd_arp_offload_enable(dhd, TRUE);
8897 dhd_arp_offload_set(dhd, dhd_arp_mode);
8899 dhd_arp_offload_enable(dhd, FALSE);
8900 dhd_arp_offload_set(dhd, 0);
8902 dhd_arp_enable = arpoe;
8903 #endif /* ARP_OFFLOAD_SUPPORT */
8905 #ifdef PKT_FILTER_SUPPORT
8906 /* Setup default defintions for pktfilter , enable in suspend */
8907 if (dhd_master_mode) {
8908 dhd->pktfilter_count = 6;
8909 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
8910 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
8911 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8912 /* apply APP pktfilter */
8913 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
8915 /* Setup filter to allow only unicast */
8916 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
8918 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
8919 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
8921 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
8922 dhd->pktfilter_count = 4;
8923 /* Setup filter to block broadcast and NAT Keepalive packets */
8924 /* discard all broadcast packets */
8925 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
8926 /* discard NAT Keepalive packets */
8927 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
8928 /* discard NAT Keepalive packets */
8929 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
8930 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8931 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
8933 dhd_conf_discard_pkt_filter(dhd);
8934 dhd_conf_add_pkt_filter(dhd);
8938 dhd_enable_packet_filter(0, dhd);
8940 #endif /* defined(SOFTAP) */
8941 dhd_set_packet_filter(dhd);
8942 #endif /* PKT_FILTER_SUPPORT */
8944 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
8945 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8946 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
8947 #endif /* DISABLE_11N */
8949 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
8950 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
8951 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8952 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8953 /* query for 'ver' to get version info from firmware */
8954 memset(buf, 0, sizeof(buf));
8956 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
8957 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
8958 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
8960 bcmstrtok(&ptr, "\n", 0);
8961 /* Print fw version info */
8962 DHD_ERROR(("Firmware version = %s\n", buf));
8963 strncpy(fw_version, buf, FW_VER_STR_LEN);
8964 dhd_set_version_info(dhd, buf);
8965 #ifdef WRITE_WLANINFO
8966 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
8967 #endif /* WRITE_WLANINFO */
8970 #if defined(BCMSDIO)
8971 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
8972 // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
8973 dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 1, FALSE);
8974 #endif /* defined(BCMSDIO) */
8976 dhd_conf_set_disable_proptx(dhd);
8977 #if defined(BCMSDIO)
8978 #ifdef PROP_TXSTATUS
8979 if (disable_proptx ||
8980 #ifdef PROP_TXSTATUS_VSDB
8981 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
8982 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
8983 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
8984 #endif /* PROP_TXSTATUS_VSDB */
8986 wlfc_enable = FALSE;
8989 #ifdef USE_WFA_CERT_CONF
8990 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
8991 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
8992 wlfc_enable = proptx;
8994 #endif /* USE_WFA_CERT_CONF */
8997 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8998 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
8999 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
9000 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
9001 if (ret2 != BCME_UNSUPPORTED)
9004 if (ret == BCME_NOTDOWN) {
9006 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
9007 sizeof(wl_down), TRUE, 0);
9008 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
9009 __FUNCTION__, ret2, hostreorder));
9011 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
9012 iovbuf, sizeof(iovbuf));
9013 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9014 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
9015 if (ret2 != BCME_UNSUPPORTED)
9018 if (ret2 != BCME_OK)
9021 #endif /* DISABLE_11N */
9027 else if (hostreorder)
9028 dhd_wlfc_hostreorder_init(dhd);
9029 #endif /* DISABLE_11N */
9031 #endif /* PROP_TXSTATUS */
9032 #endif /* BCMSDIO || BCMBUS */
9033 #ifdef PCIE_FULL_DONGLE
9034 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
9035 if (FW_SUPPORTED(dhd, ap)) {
9036 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
9037 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
9038 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9039 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9041 #endif /* PCIE_FULL_DONGLE */
9043 if (!dhd->pno_state) {
9048 dhd_interworking_enable(dhd);
9051 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
9054 #ifdef SUPPORT_SENSORHUB
9055 bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
9056 if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
9058 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
9059 __FUNCTION__, ret));
9060 dhd->info->shub_enable = 0;
9062 memcpy(&shub_enable, iovbuf, sizeof(uint32));
9063 dhd->info->shub_enable = shub_enable;
9064 DHD_ERROR(("%s: checking sensorhub enable %d\n",
9065 __FUNCTION__, dhd->info->shub_enable));
9067 #endif /* SUPPORT_SENSORHUB */
9071 kfree(eventmask_msg);
9080 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
9082 char buf[strlen(name) + 1 + cmd_len];
9083 int len = sizeof(buf);
9087 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
9089 memset(&ioc, 0, sizeof(ioc));
9091 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
9096 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
9097 if (!set && ret >= 0)
9098 memcpy(cmd_buf, buf, cmd_len);
9103 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
9105 struct dhd_info *dhd = dhdp->info;
9106 struct net_device *dev = NULL;
9108 ASSERT(dhd && dhd->iflist[ifidx]);
9109 dev = dhd->iflist[ifidx]->net;
9112 if (netif_running(dev)) {
9113 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
9114 return BCME_NOTDOWN;
9117 #define DHD_MIN_MTU 1500
9118 #define DHD_MAX_MTU 1752
9120 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
9121 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
9129 #ifdef ARP_OFFLOAD_SUPPORT
9130 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
9132 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
9134 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
9138 bzero(ipv4_buf, sizeof(ipv4_buf));
9140 /* display what we've got */
9141 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9142 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
9144 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9146 /* now we saved hoste_ip table, clr it in the dongle AOE */
9147 dhd_aoe_hostip_clr(dhd_pub, idx);
9150 DHD_ERROR(("%s failed\n", __FUNCTION__));
9154 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
9155 if (add && (ipv4_buf[i] == 0)) {
9157 add = FALSE; /* added ipa to local table */
9158 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
9160 } else if (ipv4_buf[i] == ipa) {
9162 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
9163 __FUNCTION__, ipa, i));
9166 if (ipv4_buf[i] != 0) {
9167 /* add back host_ip entries from our local cache */
9168 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
9169 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
9170 __FUNCTION__, ipv4_buf[i], i));
9174 /* see the resulting hostip table */
9175 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9176 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
9177 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9182 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
9183 * whenever there is an event related to an IP address.
9184 * ptr : kernel provided pointer to IP address that has changed
9186 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
9187 unsigned long event,
9190 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
9196 if (!dhd_arp_enable)
9198 if (!ifa || !(ifa->ifa_dev->dev))
9201 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9202 /* Filter notifications meant for non Broadcom devices */
9203 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
9204 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
9205 #if defined(WL_ENABLE_P2P_IF)
9206 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
9207 #endif /* WL_ENABLE_P2P_IF */
9210 #endif /* LINUX_VERSION_CODE */
9212 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
9216 dhd_pub = &dhd->pub;
9218 if (dhd_pub->arp_version == 1) {
9221 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
9222 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
9225 if (idx < DHD_MAX_IFS) {
9226 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
9227 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
9229 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
9236 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
9237 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9239 if (dhd->pub.busstate != DHD_BUS_DATA) {
9240 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
9241 if (dhd->pend_ipaddr) {
9242 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
9243 __FUNCTION__, dhd->pend_ipaddr));
9245 dhd->pend_ipaddr = ifa->ifa_address;
9249 #ifdef AOE_IP_ALIAS_SUPPORT
9250 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
9252 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
9253 #endif /* AOE_IP_ALIAS_SUPPORT */
9257 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
9258 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9259 dhd->pend_ipaddr = 0;
9260 #ifdef AOE_IP_ALIAS_SUPPORT
9261 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
9263 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
9265 dhd_aoe_hostip_clr(&dhd->pub, idx);
9266 dhd_aoe_arp_clr(&dhd->pub, idx);
9267 #endif /* AOE_IP_ALIAS_SUPPORT */
9271 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
9272 __func__, ifa->ifa_label, event));
9277 #endif /* ARP_OFFLOAD_SUPPORT */
9279 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9280 /* Neighbor Discovery Offload: defered handler */
9282 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
9284 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
9285 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
9288 if (event != DHD_WQ_WORK_IPV6_NDO) {
9289 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9294 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
9299 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
9303 if (ndo_work->if_idx) {
9304 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
9308 switch (ndo_work->event) {
9310 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
9311 ret = dhd_ndo_enable(pub, TRUE);
9313 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
9316 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
9318 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
9319 __FUNCTION__, ret));
9323 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
9324 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
9326 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
9327 __FUNCTION__, ret));
9331 ret = dhd_ndo_enable(pub, FALSE);
9333 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
9338 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
9342 /* free ndo_work. alloced while scheduling the work */
9349 * Neighbor Discovery Offload: Called when an interface
9350 * is assigned with ipv6 address.
9351 * Handles only primary interface
9353 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
9354 unsigned long event,
9359 struct inet6_ifaddr *inet6_ifa = ptr;
9360 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
9361 struct ipv6_work_info_t *ndo_info;
9362 int idx = 0; /* REVISIT */
9364 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9365 /* Filter notifications meant for non Broadcom devices */
9366 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
9369 #endif /* LINUX_VERSION_CODE */
9371 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
9375 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
9377 dhd_pub = &dhd->pub;
9379 if (!FW_SUPPORTED(dhd_pub, ndoe))
9382 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
9384 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
9388 ndo_info->event = event;
9389 ndo_info->if_idx = idx;
9390 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
9392 /* defer the work to thread as it may block kernel */
9393 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
9394 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
9397 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9400 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
9402 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9404 struct net_device *net = NULL;
9406 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
9408 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9410 ASSERT(dhd && dhd->iflist[ifidx]);
9411 ifp = dhd->iflist[ifidx];
9413 ASSERT(net && (ifp->idx == ifidx));
9415 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9417 net->get_stats = dhd_get_stats;
9418 net->do_ioctl = dhd_ioctl_entry;
9419 net->hard_start_xmit = dhd_start_xmit;
9420 net->set_mac_address = dhd_set_mac_address;
9421 net->set_multicast_list = dhd_set_multicast_list;
9422 net->open = net->stop = NULL;
9424 ASSERT(!net->netdev_ops);
9425 net->netdev_ops = &dhd_ops_virt;
9426 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9428 /* Ok, link into the network layer... */
9431 * device functions for the primary interface only
9433 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9434 net->open = dhd_open;
9435 net->stop = dhd_stop;
9437 net->netdev_ops = &dhd_ops_pri;
9438 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9439 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
9440 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9443 * We have to use the primary MAC for virtual interfaces
9445 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
9447 * Android sets the locally administered bit to indicate that this is a
9448 * portable hotspot. This will not work in simultaneous AP/STA mode,
9449 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
9451 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
9453 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
9454 __func__, net->name));
9455 temp_addr[0] |= 0x02;
9459 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
9460 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
9461 net->ethtool_ops = &dhd_ethtool_ops;
9462 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
9464 #if defined(WL_WIRELESS_EXT)
9465 #if WIRELESS_EXT < 19
9466 net->get_wireless_stats = dhd_get_wireless_stats;
9467 #endif /* WIRELESS_EXT < 19 */
9468 #if WIRELESS_EXT > 12
9469 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
9470 #endif /* WIRELESS_EXT > 12 */
9471 #endif /* defined(WL_WIRELESS_EXT) */
9473 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9475 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
9478 printf("%s\n", dhd_version);
9481 err = register_netdev(net);
9483 err = register_netdevice(net);
9486 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
9492 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
9493 #if defined(CUSTOMER_HW4_DEBUG)
9494 MAC2STRDBG(dhd->pub.mac.octet));
9496 MAC2STRDBG(net->dev_addr));
9497 #endif /* CUSTOMER_HW4_DEBUG */
9499 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
9500 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
9503 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
9504 KERNEL_VERSION(2, 6, 27))))
9507 up(&dhd_registration_sem);
9508 #endif /* BCMLXSDMMC */
9509 if (!dhd_download_fw_on_driverload) {
9511 wl_terminate_event_handler();
9512 #endif /* WL_CFG80211 */
9513 #if defined(DHD_LB) && defined(DHD_LB_RXP)
9514 __skb_queue_purge(&dhd->rx_pend_queue);
9515 #endif /* DHD_LB && DHD_LB_RXP */
9516 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
9517 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
9518 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
9519 dhd_net_bus_devreset(net, TRUE);
9521 dhd_net_bus_suspend(net);
9522 #endif /* BCMLXSDMMC */
9523 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
9526 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
9530 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
9533 net->netdev_ops = NULL;
9539 dhd_bus_detach(dhd_pub_t *dhdp)
9543 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9546 dhd = (dhd_info_t *)dhdp->info;
9550 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
9551 * calling stop again will cuase SD read/write errors.
9553 if (dhd->pub.busstate != DHD_BUS_DOWN) {
9554 /* Stop the protocol module */
9555 dhd_prot_stop(&dhd->pub);
9557 /* Stop the bus module */
9558 dhd_bus_stop(dhd->pub.bus, TRUE);
9561 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
9562 dhd_bus_oob_intr_unregister(dhdp);
9569 void dhd_detach(dhd_pub_t *dhdp)
9572 unsigned long flags;
9573 int timer_valid = FALSE;
9574 struct net_device *dev;
9579 dhd = (dhd_info_t *)dhdp->info;
9583 dev = dhd->iflist[0]->net;
9587 if (dev->flags & IFF_UP) {
9588 /* If IFF_UP is still up, it indicates that
9589 * "ifconfig wlan0 down" hasn't been called.
9590 * So invoke dev_close explicitly here to
9591 * bring down the interface.
9593 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
9599 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
9602 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
9603 /* Give sufficient time for threads to start running in case
9604 * dhd_attach() has failed
9609 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9610 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9612 #ifdef PROP_TXSTATUS
9613 #ifdef DHD_WLFC_THREAD
9614 if (dhd->pub.wlfc_thread) {
9615 kthread_stop(dhd->pub.wlfc_thread);
9616 dhdp->wlfc_thread_go = TRUE;
9617 wake_up_interruptible(&dhdp->wlfc_wqhead);
9619 dhd->pub.wlfc_thread = NULL;
9620 #endif /* DHD_WLFC_THREAD */
9621 #endif /* PROP_TXSTATUS */
9623 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
9625 dhd_bus_detach(dhdp);
9627 if (is_reboot == SYS_RESTART) {
9628 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
9629 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
9630 dhdpcie_bus_clock_stop(dhdp->bus);
9631 wifi_platform_set_power(dhd_wifi_platdata->adapters,
9632 FALSE, WIFI_TURNOFF_DELAY);
9635 #endif /* BCMPCIE */
9636 #ifndef PCIE_FULL_DONGLE
9638 dhd_prot_detach(dhdp);
9642 #ifdef ARP_OFFLOAD_SUPPORT
9643 if (dhd_inetaddr_notifier_registered) {
9644 dhd_inetaddr_notifier_registered = FALSE;
9645 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
9647 #endif /* ARP_OFFLOAD_SUPPORT */
9648 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9649 if (dhd_inet6addr_notifier_registered) {
9650 dhd_inet6addr_notifier_registered = FALSE;
9651 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
9653 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9654 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9655 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
9656 if (dhd->early_suspend.suspend)
9657 unregister_early_suspend(&dhd->early_suspend);
9659 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9661 #if defined(WL_WIRELESS_EXT)
9662 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
9663 /* Detatch and unlink in the iw */
9666 #endif /* defined(WL_WIRELESS_EXT) */
9668 /* delete all interfaces, start with virtual */
9669 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
9673 /* Cleanup virtual interfaces */
9674 dhd_net_if_lock_local(dhd);
9675 for (i = 1; i < DHD_MAX_IFS; i++) {
9677 dhd_remove_if(&dhd->pub, i, TRUE);
9679 dhd_net_if_unlock_local(dhd);
9681 /* delete primary interface 0 */
9682 ifp = dhd->iflist[0];
9685 if (ifp && ifp->net) {
9689 /* in unregister_netdev case, the interface gets freed by net->destructor
9690 * (which is set to free_netdev)
9692 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9693 free_netdev(ifp->net);
9696 custom_rps_map_clear(ifp->net->_rx);
9697 #endif /* SET_RPS_CPUS */
9698 netif_tx_disable(ifp->net);
9699 unregister_netdev(ifp->net);
9703 dhd_wmf_cleanup(dhdp, 0);
9704 #endif /* DHD_WMF */
9705 #ifdef DHD_L2_FILTER
9706 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
9707 NULL, FALSE, dhdp->tickcnt);
9708 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
9709 ifp->phnd_arp_table = NULL;
9710 #endif /* DHD_L2_FILTER */
9712 dhd_if_del_sta_list(ifp);
9714 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
9715 dhd->iflist[0] = NULL;
9719 /* Clear the watchdog timer */
9720 DHD_GENERAL_LOCK(&dhd->pub, flags);
9721 timer_valid = dhd->wd_timer_valid;
9722 dhd->wd_timer_valid = FALSE;
9723 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9725 del_timer_sync(&dhd->timer);
9726 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9728 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
9729 #ifdef DHD_PCIE_RUNTIMEPM
9730 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
9731 PROC_STOP(&dhd->thr_rpm_ctl);
9733 #endif /* DHD_PCIE_RUNTIMEPM */
9734 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
9735 PROC_STOP(&dhd->thr_wdt_ctl);
9738 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
9739 PROC_STOP(&dhd->thr_rxf_ctl);
9742 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
9743 PROC_STOP(&dhd->thr_dpc_ctl);
9745 tasklet_kill(&dhd->tasklet);
9747 __skb_queue_purge(&dhd->rx_pend_queue);
9748 #endif /* DHD_LB_RXP */
9753 /* Kill the Load Balancing Tasklets */
9754 #if defined(DHD_LB_TXC)
9755 tasklet_disable(&dhd->tx_compl_tasklet);
9756 tasklet_kill(&dhd->tx_compl_tasklet);
9757 #endif /* DHD_LB_TXC */
9758 #if defined(DHD_LB_RXC)
9759 tasklet_disable(&dhd->rx_compl_tasklet);
9760 tasklet_kill(&dhd->rx_compl_tasklet);
9761 #endif /* DHD_LB_RXC */
9762 if (dhd->cpu_notifier.notifier_call != NULL)
9763 unregister_cpu_notifier(&dhd->cpu_notifier);
9764 dhd_cpumasks_deinit(dhd);
9768 dhd_log_dump_deinit(&dhd->pub);
9769 #endif /* DHD_LOG_DUMP */
9771 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
9772 wl_cfg80211_detach(NULL);
9773 dhd_monitor_uninit();
9776 /* free deferred work queue */
9777 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
9778 dhd->dhd_deferred_wq = NULL;
9780 #ifdef SHOW_LOGTRACE
9781 if (dhd->event_data.fmts)
9782 kfree(dhd->event_data.fmts);
9783 if (dhd->event_data.raw_fmts)
9784 kfree(dhd->event_data.raw_fmts);
9785 if (dhd->event_data.raw_sstr)
9786 kfree(dhd->event_data.raw_sstr);
9787 #endif /* SHOW_LOGTRACE */
9790 if (dhdp->pno_state)
9791 dhd_pno_deinit(dhdp);
9793 #if defined(CONFIG_PM_SLEEP)
9794 if (dhd_pm_notifier_registered) {
9795 unregister_pm_notifier(&dhd->pm_notifier);
9796 dhd_pm_notifier_registered = FALSE;
9798 #endif /* CONFIG_PM_SLEEP */
9800 #ifdef DEBUG_CPU_FREQ
9802 free_percpu(dhd->new_freq);
9803 dhd->new_freq = NULL;
9804 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9806 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
9807 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
9808 #ifdef CONFIG_HAS_WAKELOCK
9809 dhd->wakelock_wd_counter = 0;
9810 wake_lock_destroy(&dhd->wl_wdwake);
9811 #endif /* CONFIG_HAS_WAKELOCK */
9812 DHD_OS_WAKE_LOCK_DESTROY(dhd);
9817 #ifdef DHDTCPACK_SUPPRESS
9818 /* This will free all MEM allocated for TCPACK SUPPRESS */
9819 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9820 #endif /* DHDTCPACK_SUPPRESS */
9822 #ifdef PCIE_FULL_DONGLE
9823 dhd_flow_rings_deinit(dhdp);
9825 dhd_prot_detach(dhdp);
9829 dhd_sysfs_exit(dhd);
9830 dhd->pub.is_fw_download_done = FALSE;
9831 dhd_conf_detach(dhdp);
9836 dhd_free(dhd_pub_t *dhdp)
9839 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9843 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9844 if (dhdp->reorder_bufs[i]) {
9845 reorder_info_t *ptr;
9846 uint32 buf_size = sizeof(struct reorder_info);
9848 ptr = dhdp->reorder_bufs[i];
9850 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9851 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9852 i, ptr->max_idx, buf_size));
9854 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9855 dhdp->reorder_bufs[i] = NULL;
9859 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
9861 dhd = (dhd_info_t *)dhdp->info;
9862 if (dhdp->soc_ram) {
9863 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9864 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9866 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9867 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9868 dhdp->soc_ram = NULL;
9870 #ifdef CACHE_FW_IMAGES
9871 if (dhdp->cached_fw) {
9872 MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
9873 dhdp->cached_fw = NULL;
9876 if (dhdp->cached_nvram) {
9877 MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
9878 dhdp->cached_nvram = NULL;
9881 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
9883 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
9884 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
9890 dhd_clear(dhd_pub_t *dhdp)
9892 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9896 #ifdef DHDTCPACK_SUPPRESS
9897 /* Clean up timer/data structure for any remaining/pending packet or timer. */
9898 dhd_tcpack_info_tbl_clean(dhdp);
9899 #endif /* DHDTCPACK_SUPPRESS */
9900 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9901 if (dhdp->reorder_bufs[i]) {
9902 reorder_info_t *ptr;
9903 uint32 buf_size = sizeof(struct reorder_info);
9905 ptr = dhdp->reorder_bufs[i];
9907 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9908 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9909 i, ptr->max_idx, buf_size));
9911 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9912 dhdp->reorder_bufs[i] = NULL;
9916 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
9918 if (dhdp->soc_ram) {
9919 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9920 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9922 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9923 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9924 dhdp->soc_ram = NULL;
9930 dhd_module_cleanup(void)
9932 printf("%s: Enter\n", __FUNCTION__);
9934 dhd_bus_unregister();
9938 dhd_wifi_platform_unregister_drv();
9939 printf("%s: Exit\n", __FUNCTION__);
9943 dhd_module_exit(void)
9946 dhd_module_cleanup();
9947 unregister_reboot_notifier(&dhd_reboot_notifier);
9951 dhd_module_init(void)
9956 printf("%s: in\n", __FUNCTION__);
9960 DHD_PERIM_RADIO_INIT();
9963 if (firmware_path[0] != '\0') {
9964 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
9965 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
9968 if (nvram_path[0] != '\0') {
9969 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
9970 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
9974 err = dhd_wifi_platform_register_drv();
9976 register_reboot_notifier(&dhd_reboot_notifier);
9980 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
9981 __FUNCTION__, retry));
9982 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
9983 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
9984 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
9985 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
9990 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
9992 if (!dhd_download_fw_on_driverload) {
9993 dhd_driver_init_done = TRUE;
9997 printf("%s: Exit err=%d\n", __FUNCTION__, err);
10002 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
10004 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
10005 if (code == SYS_RESTART) {
10008 #endif /* BCMPCIE */
10010 return NOTIFY_DONE;
10013 static int wifi_init_thread(void *data)
10020 int __init rockchip_wifi_init_module_rkwifi(void)
10022 struct task_struct *kthread = NULL;
10024 kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
10025 if (IS_ERR(kthread))
10026 pr_err("create wifi_init_thread failed.\n");
10031 void __exit rockchip_wifi_exit_module_rkwifi(void)
10036 late_initcall(rockchip_wifi_init_module_rkwifi);
10037 module_exit(rockchip_wifi_exit_module_rkwifi);
10040 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
10041 #if defined(CONFIG_DEFERRED_INITCALLS)
10042 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
10043 defined(CONFIG_ARCH_MSM8996)
10044 deferred_module_init_sync(dhd_module_init);
10046 deferred_module_init(dhd_module_init);
10047 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
10048 * CONFIG_ARCH_MSM8996
10050 #elif defined(USE_LATE_INITCALL_SYNC)
10051 late_initcall_sync(dhd_module_init);
10053 late_initcall(dhd_module_init);
10054 #endif /* USE_LATE_INITCALL_SYNC */
10056 module_init(dhd_module_init);
10057 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
10059 module_exit(dhd_module_exit);
10063 * OS specific functions required to implement DHD driver in OS independent way
10066 dhd_os_proto_block(dhd_pub_t *pub)
10068 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10071 DHD_PERIM_UNLOCK(pub);
10073 down(&dhd->proto_sem);
10075 DHD_PERIM_LOCK(pub);
10083 dhd_os_proto_unblock(dhd_pub_t *pub)
10085 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10088 up(&dhd->proto_sem);
10096 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
10098 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10101 mutex_lock(&dhd->dhd_iovar_mutex);
10106 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
10108 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10111 mutex_unlock(&dhd->dhd_iovar_mutex);
10116 dhd_os_get_ioctl_resp_timeout(void)
10118 return ((unsigned int)dhd_ioctl_timeout_msec);
10122 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
10124 dhd_ioctl_timeout_msec = (int)timeout_msec;
10128 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
10130 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10133 /* Convert timeout in millsecond to jiffies */
10134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10135 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10137 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10140 DHD_PERIM_UNLOCK(pub);
10142 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
10144 DHD_PERIM_LOCK(pub);
10150 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
10152 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10154 wake_up(&dhd->ioctl_resp_wait);
10159 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
10161 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10164 /* Convert timeout in millsecond to jiffies */
10165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10166 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10168 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10171 DHD_PERIM_UNLOCK(pub);
10173 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
10175 DHD_PERIM_LOCK(pub);
10181 dhd_os_d3ack_wake(dhd_pub_t *pub)
10183 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10185 wake_up(&dhd->d3ack_wait);
10190 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
10192 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10195 /* Wait for bus usage contexts to gracefully exit within some timeout value
10196 * Set time out to little higher than dhd_ioctl_timeout_msec,
10197 * so that IOCTL timeout should not get affected.
10199 /* Convert timeout in millsecond to jiffies */
10200 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10201 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
10203 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
10206 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
10212 dhd_os_busbusy_wake(dhd_pub_t *pub)
10214 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10215 /* Call wmb() to make sure before waking up the other event value gets updated */
10217 wake_up(&dhd->dhd_bus_busy_state_wait);
10222 dhd_os_wd_timer_extend(void *bus, bool extend)
10224 dhd_pub_t *pub = bus;
10225 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10228 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
10230 dhd_os_wd_timer(bus, dhd->default_wd_interval);
10235 dhd_os_wd_timer(void *bus, uint wdtick)
10237 dhd_pub_t *pub = bus;
10238 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10239 unsigned long flags;
10241 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10244 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10248 DHD_OS_WD_WAKE_LOCK(pub);
10249 DHD_GENERAL_LOCK(pub, flags);
10251 /* don't start the wd until fw is loaded */
10252 if (pub->busstate == DHD_BUS_DOWN) {
10253 DHD_GENERAL_UNLOCK(pub, flags);
10255 DHD_OS_WD_WAKE_UNLOCK(pub);
10259 /* Totally stop the timer */
10260 if (!wdtick && dhd->wd_timer_valid == TRUE) {
10261 dhd->wd_timer_valid = FALSE;
10262 DHD_GENERAL_UNLOCK(pub, flags);
10263 del_timer_sync(&dhd->timer);
10264 DHD_OS_WD_WAKE_UNLOCK(pub);
10269 DHD_OS_WD_WAKE_LOCK(pub);
10270 dhd_watchdog_ms = (uint)wdtick;
10271 /* Re arm the timer, at last watchdog period */
10272 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
10273 dhd->wd_timer_valid = TRUE;
10275 DHD_GENERAL_UNLOCK(pub, flags);
10276 DHD_OS_WD_WAKE_UNLOCK(pub);
10279 #ifdef DHD_PCIE_RUNTIMEPM
10281 dhd_os_runtimepm_timer(void *bus, uint tick)
10283 dhd_pub_t *pub = bus;
10284 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10285 unsigned long flags;
10287 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10290 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
10294 DHD_GENERAL_LOCK(pub, flags);
10296 /* don't start the RPM until fw is loaded */
10297 if (pub->busstate == DHD_BUS_DOWN ||
10298 pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
10299 DHD_GENERAL_UNLOCK(pub, flags);
10303 /* If tick is non-zero, the request is to start the timer */
10305 /* Start the timer only if its not already running */
10306 if (dhd->rpm_timer_valid == FALSE) {
10307 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
10308 dhd->rpm_timer_valid = TRUE;
10311 /* tick is zero, we have to stop the timer */
10312 /* Stop the timer only if its running, otherwise we don't have to do anything */
10313 if (dhd->rpm_timer_valid == TRUE) {
10314 dhd->rpm_timer_valid = FALSE;
10315 DHD_GENERAL_UNLOCK(pub, flags);
10316 del_timer_sync(&dhd->rpm_timer);
10317 /* we have already released the lock, so just go to exit */
10322 DHD_GENERAL_UNLOCK(pub, flags);
10328 #endif /* DHD_PCIE_RUNTIMEPM */
10331 dhd_os_open_image(char *filename)
10336 fp = filp_open(filename, O_RDONLY, 0);
10338 * 2.6.11 (FC4) supports filp_open() but later revs don't?
10340 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
10348 if (!S_ISREG(file_inode(fp)->i_mode)) {
10349 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
10354 size = i_size_read(file_inode(fp));
10356 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
10361 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
10368 dhd_os_get_image_block(char *buf, int len, void *image)
10370 struct file *fp = (struct file *)image;
10377 size = i_size_read(file_inode(fp));
10378 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
10380 if (len >= size && size != rdlen) {
10385 fp->f_pos += rdlen;
10391 dhd_os_close_image(void *image)
10394 filp_close((struct file *)image, NULL);
10398 dhd_os_sdlock(dhd_pub_t *pub)
10402 dhd = (dhd_info_t *)(pub->info);
10404 if (dhd_dpc_prio >= 0)
10407 spin_lock_bh(&dhd->sdlock);
10411 dhd_os_sdunlock(dhd_pub_t *pub)
10415 dhd = (dhd_info_t *)(pub->info);
10417 if (dhd_dpc_prio >= 0)
10420 spin_unlock_bh(&dhd->sdlock);
10424 dhd_os_sdlock_txq(dhd_pub_t *pub)
10428 dhd = (dhd_info_t *)(pub->info);
10429 spin_lock_bh(&dhd->txqlock);
10433 dhd_os_sdunlock_txq(dhd_pub_t *pub)
10437 dhd = (dhd_info_t *)(pub->info);
10438 spin_unlock_bh(&dhd->txqlock);
10442 dhd_os_sdlock_rxq(dhd_pub_t *pub)
10447 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
10452 dhd_os_rxflock(dhd_pub_t *pub)
10456 dhd = (dhd_info_t *)(pub->info);
10457 spin_lock_bh(&dhd->rxf_lock);
10462 dhd_os_rxfunlock(dhd_pub_t *pub)
10466 dhd = (dhd_info_t *)(pub->info);
10467 spin_unlock_bh(&dhd->rxf_lock);
10470 #ifdef DHDTCPACK_SUPPRESS
10472 dhd_os_tcpacklock(dhd_pub_t *pub)
10475 unsigned long flags = 0;
10477 dhd = (dhd_info_t *)(pub->info);
10481 spin_lock_bh(&dhd->tcpack_lock);
10483 spin_lock_irqsave(&dhd->tcpack_lock, flags);
10484 #endif /* BCMSDIO */
10491 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
10496 BCM_REFERENCE(flags);
10497 #endif /* BCMSDIO */
10499 dhd = (dhd_info_t *)(pub->info);
10503 spin_lock_bh(&dhd->tcpack_lock);
10505 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
10506 #endif /* BCMSDIO */
10509 #endif /* DHDTCPACK_SUPPRESS */
10511 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
10514 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
10516 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
10517 if (buf == NULL && kmalloc_if_fail)
10518 buf = kmalloc(size, flags);
10523 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
10527 #if defined(WL_WIRELESS_EXT)
10528 struct iw_statistics *
10529 dhd_get_wireless_stats(struct net_device *dev)
10532 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10534 if (!dhd->pub.up) {
10538 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
10541 return &dhd->iw.wstats;
10545 #endif /* defined(WL_WIRELESS_EXT) */
10548 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
10549 wl_event_msg_t *event, void **data)
10552 ASSERT(dhd != NULL);
10554 #ifdef SHOW_LOGTRACE
10555 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
10557 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
10558 #endif /* SHOW_LOGTRACE */
10560 if (bcmerror != BCME_OK)
10563 #if defined(WL_WIRELESS_EXT)
10564 if (event->bsscfgidx == 0) {
10566 * Wireless ext is on primary interface only
10569 ASSERT(dhd->iflist[*ifidx] != NULL);
10570 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10572 if (dhd->iflist[*ifidx]->net) {
10573 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
10576 #endif /* defined(WL_WIRELESS_EXT) */
10579 ASSERT(dhd->iflist[*ifidx] != NULL);
10580 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10581 if (dhd->iflist[*ifidx]->net)
10582 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
10583 #endif /* defined(WL_CFG80211) */
10588 /* send up locally generated event */
10590 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
10592 switch (ntoh32(event->event_type)) {
10599 #ifdef LOG_INTO_TCPDUMP
10601 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
10603 struct sk_buff *p, *skb;
10610 struct ether_header eth;
10612 pktlen = sizeof(eth) + data_len;
10615 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
10616 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
10618 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
10619 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
10620 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
10621 eth.ether_type = hton16(ETHER_TYPE_BRCM);
10623 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
10624 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
10625 skb = PKTTONATIVE(dhdp->osh, p);
10626 skb_data = skb->data;
10629 ifidx = dhd_ifname2idx(dhd, "wlan0");
10630 ifp = dhd->iflist[ifidx];
10632 ifp = dhd->iflist[0];
10635 skb->dev = ifp->net;
10636 skb->protocol = eth_type_trans(skb, skb->dev);
10637 skb->data = skb_data;
10640 /* Strip header, count, deliver upward */
10641 skb_pull(skb, ETH_HLEN);
10643 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
10644 __FUNCTION__, __LINE__);
10645 /* Send the packet */
10646 if (in_interrupt()) {
10653 /* Could not allocate a sk_buf */
10654 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
10657 #endif /* LOG_INTO_TCPDUMP */
10659 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
10661 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10662 struct dhd_info *dhdinfo = dhd->info;
10664 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10665 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
10667 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
10668 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
10670 dhd_os_sdunlock(dhd);
10671 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
10672 dhd_os_sdlock(dhd);
10673 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
10677 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
10679 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10680 struct dhd_info *dhdinfo = dhd->info;
10681 if (waitqueue_active(&dhdinfo->ctrl_wait))
10682 wake_up(&dhdinfo->ctrl_wait);
10687 #if defined(BCMSDIO) || defined(BCMPCIE)
10689 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
10693 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10695 if (flag == TRUE) {
10696 /* Issue wl down command before resetting the chip */
10697 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
10698 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
10700 #ifdef PROP_TXSTATUS
10701 if (dhd->pub.wlfc_enabled)
10702 dhd_wlfc_deinit(&dhd->pub);
10703 #endif /* PROP_TXSTATUS */
10705 if (dhd->pub.pno_state)
10706 dhd_pno_deinit(&dhd->pub);
10712 dhd_update_fw_nv_path(dhd);
10713 /* update firmware and nvram path to sdio bus */
10714 dhd_bus_update_fw_nv_path(dhd->pub.bus,
10715 dhd->fw_path, dhd->nv_path, dhd->conf_path);
10717 #endif /* BCMSDIO */
10719 ret = dhd_bus_devreset(&dhd->pub, flag);
10721 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
10730 dhd_net_bus_suspend(struct net_device *dev)
10732 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10733 return dhd_bus_suspend(&dhd->pub);
10737 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
10739 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10740 return dhd_bus_resume(&dhd->pub, stage);
10743 #endif /* BCMSDIO */
10744 #endif /* BCMSDIO || BCMPCIE */
10746 int net_os_set_suspend_disable(struct net_device *dev, int val)
10748 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10752 ret = dhd->pub.suspend_disable_flag;
10753 dhd->pub.suspend_disable_flag = val;
10758 int net_os_set_suspend(struct net_device *dev, int val, int force)
10761 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10764 #ifdef CONFIG_MACH_UNIVERSAL7420
10765 #endif /* CONFIG_MACH_UNIVERSAL7420 */
10766 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
10767 ret = dhd_set_suspend(val, &dhd->pub);
10769 ret = dhd_suspend_resume_helper(dhd, val, force);
10772 wl_cfg80211_update_power_mode(dev);
10778 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
10780 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10783 dhd->pub.suspend_bcn_li_dtim = val;
10788 #ifdef PKT_FILTER_SUPPORT
10789 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
10791 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
10794 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10795 char *filterp = NULL;
10799 if (!dhd_master_mode)
10800 add_remove = !add_remove;
10801 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
10802 if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
10804 if (num >= dhd->pub.pktfilter_count)
10807 case DHD_BROADCAST_FILTER_NUM:
10808 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
10811 case DHD_MULTICAST4_FILTER_NUM:
10812 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
10815 case DHD_MULTICAST6_FILTER_NUM:
10816 filterp = "103 0 0 0 0xFFFF 0x3333";
10819 case DHD_MDNS_FILTER_NUM:
10820 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
10829 dhd->pub.pktfilter[num] = filterp;
10830 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
10831 } else { /* Delete filter */
10832 if (dhd->pub.pktfilter[num] != NULL) {
10833 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
10834 dhd->pub.pktfilter[num] = NULL;
10838 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
10841 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
10846 /* Packet filtering is set only if we still in early-suspend and
10847 * we need either to turn it ON or turn it OFF
10848 * We can always turn it OFF in case of early-suspend, but we turn it
10849 * back ON only if suspend_disable_flag was not set
10851 if (dhdp && dhdp->up) {
10852 if (dhdp->in_suspend) {
10853 if (!val || (val && !dhdp->suspend_disable_flag))
10854 dhd_enable_packet_filter(val, dhdp);
10860 /* function to enable/disable packet for Network device */
10861 int net_os_enable_packet_filter(struct net_device *dev, int val)
10863 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10865 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
10866 return dhd_os_enable_packet_filter(&dhd->pub, val);
10868 #endif /* PKT_FILTER_SUPPORT */
10871 dhd_dev_init_ioctl(struct net_device *dev)
10873 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10876 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
10884 dhd_dev_get_feature_set(struct net_device *dev)
10886 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
10887 dhd_pub_t *dhd = (&ptr->pub);
10888 int feature_set = 0;
10890 #ifdef DYNAMIC_SWOOB_DURATION
10891 #ifndef CUSTOM_INTR_WIDTH
10892 #define CUSTOM_INTR_WIDTH 100
10893 int intr_width = 0;
10894 #endif /* CUSTOM_INTR_WIDTH */
10895 #endif /* DYNAMIC_SWOOB_DURATION */
10897 return feature_set;
10899 if (FW_SUPPORTED(dhd, sta))
10900 feature_set |= WIFI_FEATURE_INFRA;
10901 if (FW_SUPPORTED(dhd, dualband))
10902 feature_set |= WIFI_FEATURE_INFRA_5G;
10903 if (FW_SUPPORTED(dhd, p2p))
10904 feature_set |= WIFI_FEATURE_P2P;
10905 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
10906 feature_set |= WIFI_FEATURE_SOFT_AP;
10907 if (FW_SUPPORTED(dhd, tdls))
10908 feature_set |= WIFI_FEATURE_TDLS;
10909 if (FW_SUPPORTED(dhd, vsdb))
10910 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
10911 if (FW_SUPPORTED(dhd, nan)) {
10912 feature_set |= WIFI_FEATURE_NAN;
10913 /* NAN is essentail for d2d rtt */
10914 if (FW_SUPPORTED(dhd, rttd2d))
10915 feature_set |= WIFI_FEATURE_D2D_RTT;
10918 feature_set |= WIFI_FEATURE_D2AP_RTT;
10919 #endif /* RTT_SUPPORT */
10920 #ifdef LINKSTAT_SUPPORT
10921 feature_set |= WIFI_FEATURE_LINKSTAT;
10922 #endif /* LINKSTAT_SUPPORT */
10923 /* Supports STA + STA always */
10924 feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
10926 if (dhd_is_pno_supported(dhd)) {
10927 feature_set |= WIFI_FEATURE_PNO;
10928 feature_set |= WIFI_FEATURE_BATCH_SCAN;
10929 #ifdef GSCAN_SUPPORT
10930 feature_set |= WIFI_FEATURE_GSCAN;
10931 #endif /* GSCAN_SUPPORT */
10933 #endif /* PNO_SUPPORT */
10935 feature_set |= WIFI_FEATURE_HOTSPOT;
10937 return feature_set;
10941 int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
10943 int feature_set_full, mem_needed;
10947 mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10948 ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
10950 DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
10955 feature_set_full = dhd_dev_get_feature_set(dev);
10957 ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
10958 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10959 (feature_set_full & WIFI_FEATURE_NAN) |
10960 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10961 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10962 (feature_set_full & WIFI_FEATURE_PNO) |
10963 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
10964 (feature_set_full & WIFI_FEATURE_GSCAN) |
10965 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
10966 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
10967 (feature_set_full & WIFI_FEATURE_EPR);
10969 ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
10970 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10971 /* Not yet verified NAN with P2P */
10972 /* (feature_set_full & WIFI_FEATURE_NAN) | */
10973 (feature_set_full & WIFI_FEATURE_P2P) |
10974 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10975 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10976 (feature_set_full & WIFI_FEATURE_EPR);
10978 ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
10979 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10980 (feature_set_full & WIFI_FEATURE_NAN) |
10981 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10982 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10983 (feature_set_full & WIFI_FEATURE_TDLS) |
10984 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
10985 (feature_set_full & WIFI_FEATURE_EPR);
10986 *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10990 #ifdef CUSTOM_FORCE_NODFS_FLAG
10992 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
10994 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10997 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
10999 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
11000 dhd->pub.force_country_change = TRUE;
11003 #endif /* CUSTOM_FORCE_NODFS_FLAG */
11005 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
11007 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
11009 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11011 return (dhd_pno_stop_for_ssid(&dhd->pub));
11013 /* Linux wrapper to call common dhd_pno_set_for_ssid */
11015 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
11016 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
11018 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11020 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
11021 pno_repeat, pno_freq_expo_max, channel_list, nchan));
11024 /* Linux wrapper to call common dhd_pno_enable */
11026 dhd_dev_pno_enable(struct net_device *dev, int enable)
11028 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11030 return (dhd_pno_enable(&dhd->pub, enable));
11033 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
11035 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
11036 struct dhd_pno_hotlist_params *hotlist_params)
11038 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11039 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
11041 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
11043 dhd_dev_pno_stop_for_batch(struct net_device *dev)
11045 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11046 return (dhd_pno_stop_for_batch(&dhd->pub));
11048 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
11050 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
11052 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11053 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
11055 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
11057 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
11059 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11060 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
11062 /* Linux wrapper to call common dhd_pno_set_mac_oui */
11064 dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
11066 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11067 return (dhd_pno_set_mac_oui(&dhd->pub, oui));
11069 #endif /* PNO_SUPPORT */
11071 #if defined(PNO_SUPPORT)
11072 #ifdef GSCAN_SUPPORT
11073 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11075 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11076 void *buf, uint8 flush)
11078 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11080 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
11083 /* Linux wrapper to call common dhd_pno_get_gscan */
11085 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11086 void *info, uint32 *len)
11088 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11090 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
11093 /* Linux wrapper to call common dhd_wait_batch_results_complete */
11095 dhd_dev_wait_batch_results_complete(struct net_device *dev)
11097 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11099 return (dhd_wait_batch_results_complete(&dhd->pub));
11102 /* Linux wrapper to call common dhd_pno_lock_batch_results */
11104 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
11106 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11108 return (dhd_pno_lock_batch_results(&dhd->pub));
11110 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
11112 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
11114 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11116 return (dhd_pno_unlock_batch_results(&dhd->pub));
11119 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
11121 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
11123 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11125 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
11128 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
11130 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
11132 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11134 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
11137 /* Linux wrapper to call common dhd_handle_swc_evt */
11139 dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes)
11141 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11143 return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
11146 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
11148 dhd_dev_hotlist_scan_event(struct net_device *dev,
11149 const void *data, int *send_evt_bytes, hotlist_type_t type)
11151 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11153 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
11156 /* Linux wrapper to call common dhd_process_full_gscan_result */
11158 dhd_dev_process_full_gscan_result(struct net_device *dev,
11159 const void *data, int *send_evt_bytes)
11161 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11163 return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
11167 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
11169 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11171 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
11177 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
11179 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11181 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
11184 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
11186 dhd_dev_retrieve_batch_scan(struct net_device *dev)
11188 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11190 return (dhd_retreive_batch_scan_results(&dhd->pub));
11192 #endif /* GSCAN_SUPPORT */
11195 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11197 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
11199 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11201 return (dhd_rtt_set_cfg(&dhd->pub, buf));
11204 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
11206 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11208 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
11211 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
11213 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11215 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
11218 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
11220 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11222 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
11226 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
11228 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11230 return (dhd_rtt_capability(&dhd->pub, capa));
11233 #endif /* RTT_SUPPORT */
11235 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11236 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
11239 struct net_device *dev;
11241 dhd = (dhd_info_t *)dhd_info;
11242 dev = dhd->iflist[0]->net;
11245 #if defined(WL_WIRELESS_EXT)
11246 wl_iw_send_priv_event(dev, "HANG");
11248 #if defined(WL_CFG80211)
11249 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11254 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
11255 extern dhd_pub_t *link_recovery;
11256 void dhd_host_recover_link(void)
11258 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
11259 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
11260 dhd_bus_set_linkdown(link_recovery, TRUE);
11261 dhd_os_send_hang_message(link_recovery);
11263 EXPORT_SYMBOL(dhd_host_recover_link);
11264 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
11266 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
11270 if (!dhdp->hang_was_sent) {
11271 dhdp->hang_was_sent = 1;
11272 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
11273 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
11274 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
11275 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
11281 int net_os_send_hang_message(struct net_device *dev)
11283 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11287 /* Report FW problem when enabled */
11288 if (dhd->pub.hang_report) {
11289 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11290 ret = dhd_os_send_hang_message(&dhd->pub);
11292 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11295 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
11297 /* Enforce bus down to stop any future traffic */
11298 dhd->pub.busstate = DHD_BUS_DOWN;
11304 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
11306 dhd_info_t *dhd = NULL;
11307 dhd_pub_t *dhdp = NULL;
11310 dhd = DHD_DEV_INFO(dev);
11315 if (!dhd || !dhdp) {
11319 reason = bcm_strtoul(string_num, NULL, 0);
11320 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
11322 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
11326 dhdp->hang_reason = reason;
11328 return net_os_send_hang_message(dev);
11330 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
11333 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
11335 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11336 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
11339 bool dhd_force_country_change(struct net_device *dev)
11341 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11343 if (dhd && dhd->pub.up)
11344 return dhd->pub.force_country_change;
11347 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
11348 wl_country_t *cspec)
11350 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11351 #ifdef CUSTOM_COUNTRY_CODE
11352 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
11353 dhd->pub.dhd_cflags);
11355 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
11356 #endif /* CUSTOM_COUNTRY_CODE */
11358 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
11360 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11361 if (dhd && dhd->pub.up) {
11362 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
11364 wl_update_wiphybands(NULL, notify);
11369 void dhd_bus_band_set(struct net_device *dev, uint band)
11371 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11372 if (dhd && dhd->pub.up) {
11374 wl_update_wiphybands(NULL, true);
11379 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
11381 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11383 if (!fw || fw[0] == '\0')
11386 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
11387 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
11389 #if defined(SOFTAP)
11390 if (strstr(fw, "apsta") != NULL) {
11391 DHD_INFO(("GOT APSTA FIRMWARE\n"));
11392 ap_fw_loaded = TRUE;
11394 DHD_INFO(("GOT STA FIRMWARE\n"));
11395 ap_fw_loaded = FALSE;
11401 void dhd_net_if_lock(struct net_device *dev)
11403 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11404 dhd_net_if_lock_local(dhd);
11407 void dhd_net_if_unlock(struct net_device *dev)
11409 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11410 dhd_net_if_unlock_local(dhd);
11413 static void dhd_net_if_lock_local(dhd_info_t *dhd)
11415 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11417 mutex_lock(&dhd->dhd_net_if_mutex);
11421 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
11423 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11425 mutex_unlock(&dhd->dhd_net_if_mutex);
11429 static void dhd_suspend_lock(dhd_pub_t *pub)
11431 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11432 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11434 mutex_lock(&dhd->dhd_suspend_mutex);
11438 static void dhd_suspend_unlock(dhd_pub_t *pub)
11440 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11441 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11443 mutex_unlock(&dhd->dhd_suspend_mutex);
11447 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
11449 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11450 unsigned long flags = 0;
11453 spin_lock_irqsave(&dhd->dhd_lock, flags);
11458 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
11460 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11463 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
11466 /* Linux specific multipurpose spinlock API */
11468 dhd_os_spin_lock_init(osl_t *osh)
11470 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
11471 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
11472 /* and this results in kernel asserts in internal builds */
11473 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
11475 spin_lock_init(lock);
11476 return ((void *)lock);
11479 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
11482 MFREE(osh, lock, sizeof(spinlock_t) + 4);
11485 dhd_os_spin_lock(void *lock)
11487 unsigned long flags = 0;
11490 spin_lock_irqsave((spinlock_t *)lock, flags);
11495 dhd_os_spin_unlock(void *lock, unsigned long flags)
11498 spin_unlock_irqrestore((spinlock_t *)lock, flags);
11502 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
11504 return (atomic_read(&dhd->pend_8021x_cnt));
11507 #define MAX_WAIT_FOR_8021X_TX 100
11510 dhd_wait_pend8021x(struct net_device *dev)
11512 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11513 int timeout = msecs_to_jiffies(10);
11514 int ntimes = MAX_WAIT_FOR_8021X_TX;
11515 int pend = dhd_get_pend_8021x_cnt(dhd);
11517 while (ntimes && pend) {
11519 set_current_state(TASK_INTERRUPTIBLE);
11520 DHD_PERIM_UNLOCK(&dhd->pub);
11521 schedule_timeout(timeout);
11522 DHD_PERIM_LOCK(&dhd->pub);
11523 set_current_state(TASK_RUNNING);
11526 pend = dhd_get_pend_8021x_cnt(dhd);
11530 atomic_set(&dhd->pend_8021x_cnt, 0);
11531 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
11538 dhd_convert_memdump_type_to_str(uint32 type, char *buf)
11540 char *type_str = NULL;
11543 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
11544 type_str = "resumed_on_timeout";
11546 case DUMP_TYPE_D3_ACK_TIMEOUT:
11547 type_str = "D3_ACK_timeout";
11549 case DUMP_TYPE_DONGLE_TRAP:
11550 type_str = "Dongle_Trap";
11552 case DUMP_TYPE_MEMORY_CORRUPTION:
11553 type_str = "Memory_Corruption";
11555 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
11556 type_str = "PKTID_AUDIT_Fail";
11558 case DUMP_TYPE_SCAN_TIMEOUT:
11559 type_str = "SCAN_timeout";
11561 case DUMP_TYPE_SCAN_BUSY:
11562 type_str = "SCAN_Busy";
11564 case DUMP_TYPE_BY_SYSDUMP:
11565 type_str = "BY_SYSDUMP";
11567 case DUMP_TYPE_BY_LIVELOCK:
11568 type_str = "BY_LIVELOCK";
11570 case DUMP_TYPE_AP_LINKUP_FAILURE:
11571 type_str = "BY_AP_LINK_FAILURE";
11574 type_str = "Unknown_type";
11578 strncpy(buf, type_str, strlen(type_str));
11579 buf[strlen(type_str)] = 0;
11583 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
11586 struct file *fp = NULL;
11587 mm_segment_t old_fs;
11589 char memdump_path[128];
11590 char memdump_type[32];
11591 struct timeval curtime;
11594 /* change to KERNEL_DS address limit */
11598 /* Init file name */
11599 memset(memdump_path, 0, sizeof(memdump_path));
11600 memset(memdump_type, 0, sizeof(memdump_type));
11601 do_gettimeofday(&curtime);
11602 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
11603 #ifdef CUSTOMER_HW4_DEBUG
11604 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11605 DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
11606 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11607 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11608 #elif defined(CUSTOMER_HW2)
11609 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11610 "/data/misc/wifi/mem_dump", memdump_type,
11611 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11612 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11614 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11615 "/installmedia/mem_dump", memdump_type,
11616 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11617 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
11618 * calling BUG_ON immediately after collecting the socram dump.
11619 * So the file write operation should directly write the contents into the
11620 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
11621 * instead of appending.
11623 file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
11624 #endif /* CUSTOMER_HW4_DEBUG */
11626 /* print SOCRAM dump file path */
11627 DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
11629 /* open file to write */
11630 fp = filp_open(memdump_path, file_mode, 0644);
11633 printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
11637 /* Write buf to file */
11638 fp->f_op->write(fp, buf, size, &pos);
11641 /* close file before return */
11643 filp_close(fp, current->files);
11645 /* restore previous address limit */
11648 /* free buf before return */
11649 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
11650 DHD_OS_PREFREE(dhd, buf, size);
11652 MFREE(dhd->osh, buf, size);
11653 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
11657 #endif /* DHD_DEBUG */
11659 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
11661 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11662 unsigned long flags;
11666 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11667 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
11668 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
11669 #ifdef CONFIG_HAS_WAKELOCK
11670 if (dhd->wakelock_rx_timeout_enable)
11671 wake_lock_timeout(&dhd->wl_rxwake,
11672 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
11673 if (dhd->wakelock_ctrl_timeout_enable)
11674 wake_lock_timeout(&dhd->wl_ctrlwake,
11675 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
11677 dhd->wakelock_rx_timeout_enable = 0;
11678 dhd->wakelock_ctrl_timeout_enable = 0;
11679 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11684 int net_os_wake_lock_timeout(struct net_device *dev)
11686 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11690 ret = dhd_os_wake_lock_timeout(&dhd->pub);
11694 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
11696 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11697 unsigned long flags;
11700 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11701 if (val > dhd->wakelock_rx_timeout_enable)
11702 dhd->wakelock_rx_timeout_enable = val;
11703 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11708 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
11710 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11711 unsigned long flags;
11714 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11715 if (val > dhd->wakelock_ctrl_timeout_enable)
11716 dhd->wakelock_ctrl_timeout_enable = val;
11717 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11722 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
11724 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11725 unsigned long flags;
11728 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11729 dhd->wakelock_ctrl_timeout_enable = 0;
11730 #ifdef CONFIG_HAS_WAKELOCK
11731 if (wake_lock_active(&dhd->wl_ctrlwake))
11732 wake_unlock(&dhd->wl_ctrlwake);
11734 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11739 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
11741 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11745 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
11749 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
11751 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11755 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
11760 #if defined(DHD_TRACE_WAKE_LOCK)
11761 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11762 #include <linux/hashtable.h>
11764 #include <linux/hash.h>
11765 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11768 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11769 /* Define 2^5 = 32 bucket size hash table */
11770 DEFINE_HASHTABLE(wklock_history, 5);
11772 /* Define 2^5 = 32 bucket size hash table */
11773 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
11774 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11776 int trace_wklock_onoff = 1;
11778 typedef enum dhd_wklock_type {
11785 struct wk_trace_record {
11786 unsigned long addr; /* Address of the instruction */
11787 dhd_wklock_t lock_type; /* lock_type */
11788 unsigned long long counter; /* counter information */
11789 struct hlist_node wklock_node; /* hash node */
11793 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
11795 struct wk_trace_record *wklock_info;
11796 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11797 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
11799 struct hlist_node *entry;
11800 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
11801 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
11802 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11804 if (wklock_info->addr == addr) {
11805 return wklock_info;
11811 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11812 #define HASH_ADD(hashtable, node, key) \
11814 hash_add(hashtable, node, key); \
11817 #define HASH_ADD(hashtable, node, key) \
11819 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
11820 hlist_add_head(node, &hashtable[index]); \
11822 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
11824 #define STORE_WKLOCK_RECORD(wklock_type) \
11826 struct wk_trace_record *wklock_info = NULL; \
11827 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
11828 wklock_info = find_wklock_entry(func_addr); \
11829 if (wklock_info) { \
11830 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
11831 wklock_info->counter = dhd->wakelock_counter; \
11833 wklock_info->counter++; \
11836 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
11837 if (!wklock_info) {\
11838 printk("Can't allocate wk_trace_record \n"); \
11840 wklock_info->addr = func_addr; \
11841 wklock_info->lock_type = wklock_type; \
11842 if (wklock_type == DHD_WAIVE_LOCK || \
11843 wklock_type == DHD_RESTORE_LOCK) { \
11844 wklock_info->counter = dhd->wakelock_counter; \
11846 wklock_info->counter++; \
11848 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
11853 static inline void dhd_wk_lock_rec_dump(void)
11856 struct wk_trace_record *wklock_info;
11858 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11859 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
11861 struct hlist_node *entry = NULL;
11862 int max_index = ARRAY_SIZE(wklock_history);
11863 for (bkt = 0; bkt < max_index; bkt++)
11864 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
11865 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11867 switch (wklock_info->lock_type) {
11868 case DHD_WAKE_LOCK:
11869 DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n",
11870 (void *)wklock_info->addr, wklock_info->counter));
11872 case DHD_WAKE_UNLOCK:
11873 DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
11874 (void *)wklock_info->addr, wklock_info->counter));
11876 case DHD_WAIVE_LOCK:
11877 DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n",
11878 (void *)wklock_info->addr, wklock_info->counter));
11880 case DHD_RESTORE_LOCK:
11881 DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
11882 (void *)wklock_info->addr, wklock_info->counter));
11888 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
11890 unsigned long flags;
11891 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11893 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11895 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11896 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11897 hash_init(wklock_history);
11899 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
11900 INIT_HLIST_HEAD(&wklock_history[i]);
11901 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11902 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11905 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
11908 struct wk_trace_record *wklock_info;
11909 struct hlist_node *tmp;
11910 unsigned long flags;
11911 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11912 struct hlist_node *entry = NULL;
11913 int max_index = ARRAY_SIZE(wklock_history);
11914 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11916 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11917 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11918 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
11920 for (bkt = 0; bkt < max_index; bkt++)
11921 hlist_for_each_entry_safe(wklock_info, entry, tmp,
11922 &wklock_history[bkt], wklock_node)
11923 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11925 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11926 hash_del(&wklock_info->wklock_node);
11928 hlist_del_init(&wklock_info->wklock_node);
11929 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11930 kfree(wklock_info);
11932 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11935 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
11937 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
11938 unsigned long flags;
11940 DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
11941 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11942 dhd_wk_lock_rec_dump();
11943 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11944 DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
11947 #define STORE_WKLOCK_RECORD(wklock_type)
11948 #endif /* ! DHD_TRACE_WAKE_LOCK */
11950 int dhd_os_wake_lock(dhd_pub_t *pub)
11952 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11953 unsigned long flags;
11957 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11958 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
11959 #ifdef CONFIG_HAS_WAKELOCK
11960 wake_lock(&dhd->wl_wifi);
11961 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
11962 dhd_bus_dev_pm_stay_awake(pub);
11965 #ifdef DHD_TRACE_WAKE_LOCK
11966 if (trace_wklock_onoff) {
11967 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
11969 #endif /* DHD_TRACE_WAKE_LOCK */
11970 dhd->wakelock_counter++;
11971 ret = dhd->wakelock_counter;
11972 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11978 int dhd_event_wake_lock(dhd_pub_t *pub)
11980 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11981 unsigned long flags;
11985 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
11986 if (dhd->wakelock_event_counter == 0) {
11987 #ifdef CONFIG_HAS_WAKELOCK
11988 wake_lock(&dhd->wl_evtwake);
11989 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
11990 dhd_bus_dev_pm_stay_awake(pub);
11993 dhd->wakelock_event_counter++;
11994 ret = dhd->wakelock_event_counter;
11995 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12001 int net_os_wake_lock(struct net_device *dev)
12003 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12007 ret = dhd_os_wake_lock(&dhd->pub);
12011 int dhd_os_wake_unlock(dhd_pub_t *pub)
12013 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12014 unsigned long flags;
12017 dhd_os_wake_lock_timeout(pub);
12019 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12021 if (dhd->wakelock_counter > 0) {
12022 dhd->wakelock_counter--;
12023 #ifdef DHD_TRACE_WAKE_LOCK
12024 if (trace_wklock_onoff) {
12025 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
12027 #endif /* DHD_TRACE_WAKE_LOCK */
12028 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12029 #ifdef CONFIG_HAS_WAKELOCK
12030 wake_unlock(&dhd->wl_wifi);
12031 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12032 dhd_bus_dev_pm_relax(pub);
12035 ret = dhd->wakelock_counter;
12037 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12042 int dhd_event_wake_unlock(dhd_pub_t *pub)
12044 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12045 unsigned long flags;
12049 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12051 if (dhd->wakelock_event_counter > 0) {
12052 dhd->wakelock_event_counter--;
12053 if (dhd->wakelock_event_counter == 0) {
12054 #ifdef CONFIG_HAS_WAKELOCK
12055 wake_unlock(&dhd->wl_evtwake);
12056 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12057 dhd_bus_dev_pm_relax(pub);
12060 ret = dhd->wakelock_event_counter;
12062 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12067 int dhd_os_check_wakelock(dhd_pub_t *pub)
12069 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12070 KERNEL_VERSION(2, 6, 36)))
12075 dhd = (dhd_info_t *)(pub->info);
12076 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12078 #ifdef CONFIG_HAS_WAKELOCK
12079 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
12080 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
12081 (wake_lock_active(&dhd->wl_wdwake))))
12083 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12084 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
12091 dhd_os_check_wakelock_all(dhd_pub_t *pub)
12093 #ifdef CONFIG_HAS_WAKELOCK
12094 int l1, l2, l3, l4, l7;
12095 int l5 = 0, l6 = 0;
12096 int c, lock_active;
12097 #endif /* CONFIG_HAS_WAKELOCK */
12098 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12099 KERNEL_VERSION(2, 6, 36)))
12105 dhd = (dhd_info_t *)(pub->info);
12109 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12111 #ifdef CONFIG_HAS_WAKELOCK
12112 c = dhd->wakelock_counter;
12113 l1 = wake_lock_active(&dhd->wl_wifi);
12114 l2 = wake_lock_active(&dhd->wl_wdwake);
12115 l3 = wake_lock_active(&dhd->wl_rxwake);
12116 l4 = wake_lock_active(&dhd->wl_ctrlwake);
12117 #ifdef BCMPCIE_OOB_HOST_WAKE
12118 l5 = wake_lock_active(&dhd->wl_intrwake);
12119 #endif /* BCMPCIE_OOB_HOST_WAKE */
12120 #ifdef DHD_USE_SCAN_WAKELOCK
12121 l6 = wake_lock_active(&dhd->wl_scanwake);
12122 #endif /* DHD_USE_SCAN_WAKELOCK */
12123 l7 = wake_lock_active(&dhd->wl_evtwake);
12124 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
12126 /* Indicate to the Host to avoid going to suspend if internal locks are up */
12127 if (dhd && lock_active) {
12128 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
12129 "ctl-%d intr-%d scan-%d evt-%d\n",
12130 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
12133 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12134 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
12137 #endif /* CONFIG_HAS_WAKELOCK */
12141 int net_os_wake_unlock(struct net_device *dev)
12143 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12147 ret = dhd_os_wake_unlock(&dhd->pub);
12151 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
12153 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12154 unsigned long flags;
12158 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12159 #ifdef CONFIG_HAS_WAKELOCK
12160 /* if wakelock_wd_counter was never used : lock it at once */
12161 if (!dhd->wakelock_wd_counter)
12162 wake_lock(&dhd->wl_wdwake);
12164 dhd->wakelock_wd_counter++;
12165 ret = dhd->wakelock_wd_counter;
12166 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12171 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
12173 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12174 unsigned long flags;
12178 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12179 if (dhd->wakelock_wd_counter) {
12180 dhd->wakelock_wd_counter = 0;
12181 #ifdef CONFIG_HAS_WAKELOCK
12182 wake_unlock(&dhd->wl_wdwake);
12185 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12190 #ifdef BCMPCIE_OOB_HOST_WAKE
12192 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
12194 #ifdef CONFIG_HAS_WAKELOCK
12195 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12198 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
12200 #endif /* CONFIG_HAS_WAKELOCK */
12204 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
12206 #ifdef CONFIG_HAS_WAKELOCK
12207 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12210 /* if wl_intrwake is active, unlock it */
12211 if (wake_lock_active(&dhd->wl_intrwake)) {
12212 wake_unlock(&dhd->wl_intrwake);
12215 #endif /* CONFIG_HAS_WAKELOCK */
12217 #endif /* BCMPCIE_OOB_HOST_WAKE */
12219 #ifdef DHD_USE_SCAN_WAKELOCK
12221 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
12223 #ifdef CONFIG_HAS_WAKELOCK
12224 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12227 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
12229 #endif /* CONFIG_HAS_WAKELOCK */
12233 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
12235 #ifdef CONFIG_HAS_WAKELOCK
12236 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12239 /* if wl_scanwake is active, unlock it */
12240 if (wake_lock_active(&dhd->wl_scanwake)) {
12241 wake_unlock(&dhd->wl_scanwake);
12244 #endif /* CONFIG_HAS_WAKELOCK */
12246 #endif /* DHD_USE_SCAN_WAKELOCK */
12248 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
12249 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
12251 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
12253 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12254 unsigned long flags;
12258 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12260 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12261 if (dhd->waive_wakelock == FALSE) {
12262 #ifdef DHD_TRACE_WAKE_LOCK
12263 if (trace_wklock_onoff) {
12264 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
12266 #endif /* DHD_TRACE_WAKE_LOCK */
12267 /* record current lock status */
12268 dhd->wakelock_before_waive = dhd->wakelock_counter;
12269 dhd->waive_wakelock = TRUE;
12271 ret = dhd->wakelock_wd_counter;
12272 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12277 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
12279 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12280 unsigned long flags;
12286 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12288 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12289 if (!dhd->waive_wakelock)
12292 dhd->waive_wakelock = FALSE;
12293 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
12294 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
12295 * the lock in between, do the same by calling wake_unlock or pm_relax
12297 #ifdef DHD_TRACE_WAKE_LOCK
12298 if (trace_wklock_onoff) {
12299 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
12301 #endif /* DHD_TRACE_WAKE_LOCK */
12303 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
12304 #ifdef CONFIG_HAS_WAKELOCK
12305 wake_lock(&dhd->wl_wifi);
12306 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12307 dhd_bus_dev_pm_stay_awake(&dhd->pub);
12309 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
12310 #ifdef CONFIG_HAS_WAKELOCK
12311 wake_unlock(&dhd->wl_wifi);
12312 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12313 dhd_bus_dev_pm_relax(&dhd->pub);
12316 dhd->wakelock_before_waive = 0;
12318 ret = dhd->wakelock_wd_counter;
12319 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12323 void dhd_os_wake_lock_init(struct dhd_info *dhd)
12325 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
12326 dhd->wakelock_event_counter = 0;
12327 dhd->wakelock_counter = 0;
12328 dhd->wakelock_rx_timeout_enable = 0;
12329 dhd->wakelock_ctrl_timeout_enable = 0;
12330 #ifdef CONFIG_HAS_WAKELOCK
12331 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
12332 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
12333 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
12334 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
12335 #ifdef BCMPCIE_OOB_HOST_WAKE
12336 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
12337 #endif /* BCMPCIE_OOB_HOST_WAKE */
12338 #ifdef DHD_USE_SCAN_WAKELOCK
12339 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
12340 #endif /* DHD_USE_SCAN_WAKELOCK */
12341 #endif /* CONFIG_HAS_WAKELOCK */
12342 #ifdef DHD_TRACE_WAKE_LOCK
12343 dhd_wk_lock_trace_init(dhd);
12344 #endif /* DHD_TRACE_WAKE_LOCK */
12347 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
12349 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
12350 #ifdef CONFIG_HAS_WAKELOCK
12351 dhd->wakelock_event_counter = 0;
12352 dhd->wakelock_counter = 0;
12353 dhd->wakelock_rx_timeout_enable = 0;
12354 dhd->wakelock_ctrl_timeout_enable = 0;
12355 wake_lock_destroy(&dhd->wl_wifi);
12356 wake_lock_destroy(&dhd->wl_rxwake);
12357 wake_lock_destroy(&dhd->wl_ctrlwake);
12358 wake_lock_destroy(&dhd->wl_evtwake);
12359 #ifdef BCMPCIE_OOB_HOST_WAKE
12360 wake_lock_destroy(&dhd->wl_intrwake);
12361 #endif /* BCMPCIE_OOB_HOST_WAKE */
12362 #ifdef DHD_USE_SCAN_WAKELOCK
12363 wake_lock_destroy(&dhd->wl_scanwake);
12364 #endif /* DHD_USE_SCAN_WAKELOCK */
12365 #ifdef DHD_TRACE_WAKE_LOCK
12366 dhd_wk_lock_trace_deinit(dhd);
12367 #endif /* DHD_TRACE_WAKE_LOCK */
12368 #endif /* CONFIG_HAS_WAKELOCK */
12371 bool dhd_os_check_if_up(dhd_pub_t *pub)
12378 /* function to collect firmware, chip id and chip version info */
12379 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
12383 i = snprintf(info_string, sizeof(info_string),
12384 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
12385 printf("%s\n", info_string);
12390 i = snprintf(&info_string[i], sizeof(info_string) - i,
12391 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
12392 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
12395 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
12399 dhd_info_t *dhd = NULL;
12401 if (!net || !DEV_PRIV(net)) {
12402 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
12406 dhd = DHD_DEV_INFO(net);
12410 ifidx = dhd_net2idx(dhd, net);
12411 if (ifidx == DHD_BAD_IF) {
12412 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
12416 DHD_OS_WAKE_LOCK(&dhd->pub);
12417 DHD_PERIM_LOCK(&dhd->pub);
12419 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
12420 dhd_check_hang(net, &dhd->pub, ret);
12422 DHD_PERIM_UNLOCK(&dhd->pub);
12423 DHD_OS_WAKE_UNLOCK(&dhd->pub);
12428 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
12430 struct net_device *net;
12432 net = dhd_idx2net(dhdp, ifidx);
12434 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
12438 return dhd_check_hang(net, dhdp, ret);
12441 /* Return instance */
12442 int dhd_get_instance(dhd_pub_t *dhdp)
12444 return dhdp->info->unit;
12448 #ifdef PROP_TXSTATUS
12450 void dhd_wlfc_plat_init(void *dhd)
12452 #ifdef USE_DYNAMIC_F2_BLKSIZE
12453 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12454 #endif /* USE_DYNAMIC_F2_BLKSIZE */
12458 void dhd_wlfc_plat_deinit(void *dhd)
12460 #ifdef USE_DYNAMIC_F2_BLKSIZE
12461 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
12462 #endif /* USE_DYNAMIC_F2_BLKSIZE */
12466 bool dhd_wlfc_skip_fc(void)
12468 #ifdef SKIP_WLFC_ON_CONCURRENT
12471 /* enable flow control in vsdb mode */
12472 return !(wl_cfg80211_is_concurrent_mode());
12474 return TRUE; /* skip flow control */
12475 #endif /* WL_CFG80211 */
12479 #endif /* SKIP_WLFC_ON_CONCURRENT */
12481 #endif /* PROP_TXSTATUS */
12484 #include <linux/debugfs.h>
12486 typedef struct dhd_dbgfs {
12487 struct dentry *debugfs_dir;
12488 struct dentry *debugfs_mem;
12493 dhd_dbgfs_t g_dbgfs;
12495 extern uint32 dhd_readregl(void *bp, uint32 addr);
12496 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
12499 dhd_dbg_state_open(struct inode *inode, struct file *file)
12501 file->private_data = inode->i_private;
12506 dhd_dbg_state_read(struct file *file, char __user *ubuf,
12507 size_t count, loff_t *ppos)
12511 loff_t pos = *ppos;
12516 if (pos >= g_dbgfs.size || !count)
12518 if (count > g_dbgfs.size - pos)
12519 count = g_dbgfs.size - pos;
12521 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
12522 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
12524 ret = copy_to_user(ubuf, &tmp, 4);
12529 *ppos = pos + count;
12537 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
12539 loff_t pos = *ppos;
12545 if (pos >= g_dbgfs.size || !count)
12547 if (count > g_dbgfs.size - pos)
12548 count = g_dbgfs.size - pos;
12550 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
12554 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
12555 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
12562 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
12571 pos = file->f_pos + off;
12574 pos = g_dbgfs.size - off;
12576 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
12579 static const struct file_operations dhd_dbg_state_ops = {
12580 .read = dhd_dbg_state_read,
12581 .write = dhd_debugfs_write,
12582 .open = dhd_dbg_state_open,
12583 .llseek = dhd_debugfs_lseek
12586 static void dhd_dbg_create(void)
12588 if (g_dbgfs.debugfs_dir) {
12589 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
12590 NULL, &dhd_dbg_state_ops);
12594 void dhd_dbg_init(dhd_pub_t *dhdp)
12596 g_dbgfs.dhdp = dhdp;
12597 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
12599 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
12600 if (IS_ERR(g_dbgfs.debugfs_dir)) {
12601 g_dbgfs.debugfs_dir = NULL;
12610 void dhd_dbg_remove(void)
12612 debugfs_remove(g_dbgfs.debugfs_mem);
12613 debugfs_remove(g_dbgfs.debugfs_dir);
12615 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
12617 #endif /* BCMDBGFS */
12619 #ifdef WLMEDIA_HTSF
12622 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
12624 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12625 struct sk_buff *skb;
12627 uint16 dport = 0, oldmagic = 0xACAC;
12631 /* timestamp packet */
12633 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
12635 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
12636 /* memcpy(&proto, p1+26, 4); */
12637 memcpy(&dport, p1+40, 2);
12638 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
12639 dport = ntoh16(dport);
12642 /* timestamp only if icmp or udb iperf with port 5555 */
12643 /* if (proto == 17 && dport == tsport) { */
12644 if (dport >= tsport && dport <= tsport + 20) {
12646 skb = (struct sk_buff *) pktbuf;
12648 htsf = dhd_get_htsf(dhd, 0);
12649 memset(skb->data + 44, 0, 2); /* clear checksum */
12650 memcpy(skb->data+82, &oldmagic, 2);
12651 memcpy(skb->data+84, &htsf, 4);
12653 memset(&ts, 0, sizeof(htsfts_t));
12654 ts.magic = HTSFMAGIC;
12655 ts.prio = PKTPRIO(pktbuf);
12656 ts.seqnum = htsf_seqnum++;
12657 ts.c10 = get_cycles();
12659 ts.endmagic = HTSFENDMAGIC;
12661 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
12665 static void dhd_dump_htsfhisto(histo_t *his, char *s)
12667 int pktcnt = 0, curval = 0, i;
12668 for (i = 0; i < (NUMBIN-2); i++) {
12670 printf("%d ", his->bin[i]);
12671 pktcnt += his->bin[i];
12673 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
12674 his->bin[NUMBIN-1], s);
12678 void sorttobin(int value, histo_t *histo)
12683 histo->bin[NUMBIN-1]++;
12686 if (value > histo->bin[NUMBIN-2]) /* store the max value */
12687 histo->bin[NUMBIN-2] = value;
12689 for (i = 0; i < (NUMBIN-2); i++) {
12690 binval += 500; /* 500m s bins */
12691 if (value <= binval) {
12696 histo->bin[NUMBIN-3]++;
12700 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
12702 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12703 struct sk_buff *skb;
12706 int d1, d2, d3, end2end;
12710 skb = PKTTONATIVE(dhdp->osh, pktbuf);
12711 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
12713 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
12714 memcpy(&old_magic, p1+78, 2);
12715 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
12719 if (htsf_ts->magic == HTSFMAGIC) {
12720 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
12721 htsf_ts->cE0 = get_cycles();
12724 if (old_magic == 0xACAC) {
12727 htsf = dhd_get_htsf(dhd, 0);
12728 memcpy(skb->data+92, &htsf, sizeof(uint32));
12730 memcpy(&ts[tsidx].t1, skb->data+80, 16);
12732 d1 = ts[tsidx].t2 - ts[tsidx].t1;
12733 d2 = ts[tsidx].t3 - ts[tsidx].t2;
12734 d3 = ts[tsidx].t4 - ts[tsidx].t3;
12735 end2end = ts[tsidx].t4 - ts[tsidx].t1;
12737 sorttobin(d1, &vi_d1);
12738 sorttobin(d2, &vi_d2);
12739 sorttobin(d3, &vi_d3);
12740 sorttobin(end2end, &vi_d4);
12742 if (end2end > 0 && end2end > maxdelay) {
12743 maxdelay = end2end;
12744 maxdelaypktno = tspktcnt;
12745 memcpy(&maxdelayts, &ts[tsidx], 16);
12747 if (++tsidx >= TSMAX)
12752 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
12754 uint32 htsf = 0, cur_cycle, delta, delta_us;
12755 uint32 factor, baseval, baseval2;
12761 if (cur_cycle > dhd->htsf.last_cycle) {
12762 delta = cur_cycle - dhd->htsf.last_cycle;
12764 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
12767 delta = delta >> 4;
12769 if (dhd->htsf.coef) {
12770 /* times ten to get the first digit */
12771 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
12772 baseval = (delta*10)/factor;
12773 baseval2 = (delta*10)/(factor+1);
12774 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
12775 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
12777 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
12783 static void dhd_dump_latency(void)
12786 int d1, d2, d3, d4, d5;
12788 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
12789 for (i = 0; i < TSMAX; i++) {
12790 d1 = ts[i].t2 - ts[i].t1;
12791 d2 = ts[i].t3 - ts[i].t2;
12792 d3 = ts[i].t4 - ts[i].t3;
12793 d4 = ts[i].t4 - ts[i].t1;
12794 d5 = ts[max].t4-ts[max].t1;
12795 if (d4 > d5 && d4 > 0) {
12798 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
12799 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
12800 d1, d2, d3, d4, i);
12803 printf("current idx = %d \n", tsidx);
12805 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
12806 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
12807 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
12808 maxdelayts.t2 - maxdelayts.t1,
12809 maxdelayts.t3 - maxdelayts.t2,
12810 maxdelayts.t4 - maxdelayts.t3,
12811 maxdelayts.t4 - maxdelayts.t1);
12816 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
12828 memset(&ioc, 0, sizeof(ioc));
12829 memset(&tsf_buf, 0, sizeof(tsf_buf));
12831 ioc.cmd = WLC_GET_VAR;
12833 ioc.len = (uint)sizeof(buf);
12836 strncpy(buf, "tsf", sizeof(buf) - 1);
12837 buf[sizeof(buf) - 1] = '\0';
12838 s1 = dhd_get_htsf(dhd, 0);
12839 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
12841 DHD_ERROR(("%s: tsf is not supported by device\n",
12842 dhd_ifname(&dhd->pub, ifidx)));
12843 return -EOPNOTSUPP;
12847 s2 = dhd_get_htsf(dhd, 0);
12849 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
12850 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
12851 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
12852 dhd->htsf.coefdec2, s2-tsf_buf.low);
12853 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
12857 void htsf_update(dhd_info_t *dhd, void *data)
12859 static ulong cur_cycle = 0, prev_cycle = 0;
12860 uint32 htsf, tsf_delta = 0;
12861 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
12865 /* cycles_t in inlcude/mips/timex.h */
12869 prev_cycle = cur_cycle;
12872 if (cur_cycle > prev_cycle)
12873 cyc_delta = cur_cycle - prev_cycle;
12877 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
12881 printf(" tsf update ata point er is null \n");
12883 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
12884 memcpy(&cur_tsf, data, sizeof(tsf_t));
12886 if (cur_tsf.low == 0) {
12887 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
12891 if (cur_tsf.low > prev_tsf.low)
12892 tsf_delta = (cur_tsf.low - prev_tsf.low);
12894 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
12895 cur_tsf.low, prev_tsf.low));
12896 if (cur_tsf.high > prev_tsf.high) {
12897 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
12898 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
12900 return; /* do not update */
12905 hfactor = cyc_delta / tsf_delta;
12906 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
12907 dec1 = tmp/tsf_delta;
12908 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
12909 tmp = (tmp - (dec1*tsf_delta))*10;
12910 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
12928 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
12929 dhd->htsf.coef = hfactor;
12930 dhd->htsf.last_cycle = cur_cycle;
12931 dhd->htsf.last_tsf = cur_tsf.low;
12932 dhd->htsf.coefdec1 = dec1;
12933 dhd->htsf.coefdec2 = dec2;
12935 htsf = prev_tsf.low;
12939 #endif /* WLMEDIA_HTSF */
12941 #ifdef CUSTOM_SET_CPUCORE
12942 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
12944 int e_dpc = 0, e_rxf = 0, retry_set = 0;
12946 if (!(dhd->chan_isvht80)) {
12947 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
12954 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12955 cpumask_of(DPC_CPUCORE));
12957 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12958 cpumask_of(PRIMARY_CPUCORE));
12960 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12961 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
12966 } while (e_dpc < 0);
12971 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
12972 cpumask_of(RXF_CPUCORE));
12974 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
12975 cpumask_of(PRIMARY_CPUCORE));
12977 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12978 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
12983 } while (e_rxf < 0);
12985 #ifdef DHD_OF_SUPPORT
12986 interrupt_set_cpucore(set);
12987 #endif /* DHD_OF_SUPPORT */
12988 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
12992 #endif /* CUSTOM_SET_CPUCORE */
12994 /* Get interface specific ap_isolate configuration */
12995 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
12997 dhd_info_t *dhd = dhdp->info;
13000 ASSERT(idx < DHD_MAX_IFS);
13002 ifp = dhd->iflist[idx];
13004 return ifp->ap_isolate;
13007 /* Set interface specific ap_isolate configuration */
13008 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
13010 dhd_info_t *dhd = dhdp->info;
13013 ASSERT(idx < DHD_MAX_IFS);
13015 ifp = dhd->iflist[idx];
13017 ifp->ap_isolate = val;
13022 #ifdef DHD_FW_COREDUMP
13025 #ifdef CUSTOMER_HW4_DEBUG
13026 #ifdef PLATFORM_SLP
13027 #define MEMDUMPINFO "/opt/etc/.memdump.info"
13029 #define MEMDUMPINFO "/data/.memdump.info"
13030 #endif /* PLATFORM_SLP */
13031 #elif defined(CUSTOMER_HW2)
13032 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
13034 #define MEMDUMPINFO "/installmedia/.memdump.info"
13035 #endif /* CUSTOMER_HW4_DEBUG */
13037 void dhd_get_memdump_info(dhd_pub_t *dhd)
13039 struct file *fp = NULL;
13040 uint32 mem_val = DUMP_MEMFILE_MAX;
13042 char *filepath = MEMDUMPINFO;
13044 /* Read memdump info from the file */
13045 fp = filp_open(filepath, O_RDONLY, 0);
13047 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13050 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13052 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13053 filp_close(fp, NULL);
13057 mem_val = bcm_atoi((char *)&mem_val);
13059 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
13060 filp_close(fp, NULL);
13064 #ifdef CUSTOMER_HW4_DEBUG
13065 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
13067 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
13068 #endif /* CUSTOMER_HW4_DEBUG */
13072 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
13074 dhd_dump_t *dump = NULL;
13075 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
13076 if (dump == NULL) {
13077 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
13081 dump->bufsize = size;
13083 #if defined(CONFIG_ARM64)
13084 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
13085 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
13086 #elif defined(__ARM_ARCH_7A__)
13087 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
13088 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
13089 #endif /* __ARM_ARCH_7A__ */
13090 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
13094 #ifdef DHD_LOG_DUMP
13095 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
13096 dhd_schedule_log_dump(dhdp);
13098 #endif /* DHD_LOG_DUMP */
13099 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
13100 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
13103 dhd_mem_dump(void *handle, void *event_info, u8 event)
13105 dhd_info_t *dhd = handle;
13106 dhd_dump_t *dump = event_info;
13109 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13114 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
13118 if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
13119 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
13122 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
13123 #ifdef DHD_LOG_DUMP
13124 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
13129 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
13131 #endif /* DHD_FW_COREDUMP */
13133 #ifdef DHD_LOG_DUMP
13135 dhd_log_dump(void *handle, void *event_info, u8 event)
13137 dhd_info_t *dhd = handle;
13140 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13144 if (do_dhd_log_dump(&dhd->pub)) {
13145 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
13150 void dhd_schedule_log_dump(dhd_pub_t *dhdp)
13152 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
13153 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
13154 dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
13158 do_dhd_log_dump(dhd_pub_t *dhdp)
13161 struct file *fp = NULL;
13162 mm_segment_t old_fs;
13164 char dump_path[128];
13165 char common_info[1024];
13166 struct timeval curtime;
13168 unsigned long flags = 0;
13174 /* Building the additional information like DHD, F/W version */
13175 memset(common_info, 0, sizeof(common_info));
13176 snprintf(common_info, sizeof(common_info),
13177 "---------- Common information ----------\n"
13178 "DHD version: %s\n"
13179 "F/W version: %s\n"
13180 "----------------------------------------\n",
13181 dhd_version, fw_version);
13183 /* change to KERNEL_DS address limit */
13187 /* Init file name */
13188 memset(dump_path, 0, sizeof(dump_path));
13189 do_gettimeofday(&curtime);
13190 snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
13191 DHD_COMMON_DUMP_PATH "debug_dump",
13192 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
13193 file_mode = O_CREAT | O_WRONLY | O_SYNC;
13195 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
13196 fp = filp_open(dump_path, file_mode, 0644);
13199 DHD_ERROR(("open file error, err = %d\n", ret));
13204 fp->f_op->write(fp, common_info, strlen(common_info), &pos);
13205 if (dhdp->dld_buf.wraparound) {
13206 fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
13208 fp->f_op->write(fp, dhdp->dld_buf.buffer,
13209 (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
13212 /* re-init dhd_log_dump_buf structure */
13213 spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
13214 dhdp->dld_buf.wraparound = 0;
13215 dhdp->dld_buf.present = dhdp->dld_buf.front;
13216 dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13217 bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13218 spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
13221 filp_close(fp, NULL);
13227 #endif /* DHD_LOG_DUMP */
13229 #ifdef BCMASSERT_LOG
13230 #ifdef CUSTOMER_HW4_DEBUG
13231 #ifdef PLATFORM_SLP
13232 #define ASSERTINFO "/opt/etc/.assert.info"
13234 #define ASSERTINFO "/data/.assert.info"
13235 #endif /* PLATFORM_SLP */
13236 #elif defined(CUSTOMER_HW2)
13237 #define ASSERTINFO "/data/misc/wifi/.assert.info"
13239 #define ASSERTINFO "/installmedia/.assert.info"
13240 #endif /* CUSTOMER_HW4_DEBUG */
13241 void dhd_get_assert_info(dhd_pub_t *dhd)
13243 struct file *fp = NULL;
13244 char *filepath = ASSERTINFO;
13247 * Read assert info from the file
13248 * 0: Trigger Kernel crash by panic()
13249 * 1: Print out the logs and don't trigger Kernel panic. (default)
13250 * 2: Trigger Kernel crash by BUG()
13251 * File doesn't exist: Keep default value (1).
13253 fp = filp_open(filepath, O_RDONLY, 0);
13255 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13258 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13260 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13262 mem_val = bcm_atoi((char *)&mem_val);
13263 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
13264 g_assert_type = mem_val;
13266 filp_close(fp, NULL);
13269 #endif /* BCMASSERT_LOG */
13273 /* Returns interface specific WMF configuration */
13274 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
13276 dhd_info_t *dhd = dhdp->info;
13279 ASSERT(idx < DHD_MAX_IFS);
13281 ifp = dhd->iflist[idx];
13284 #endif /* DHD_WMF */
13287 #if defined(DHD_L2_FILTER)
13288 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
13290 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
13294 #ifdef DHD_L2_FILTER
13296 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
13298 dhd_info_t *dhd = dhdp->info;
13301 ASSERT(bssidx < DHD_MAX_IFS);
13303 ifp = dhd->iflist[bssidx];
13304 return ifp->phnd_arp_table;
13307 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
13309 dhd_info_t *dhd = dhdp->info;
13312 ASSERT(idx < DHD_MAX_IFS);
13314 ifp = dhd->iflist[idx];
13317 return ifp->parp_enable;
13322 /* Set interface specific proxy arp configuration */
13323 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13325 dhd_info_t *dhd = dhdp->info;
13327 ASSERT(idx < DHD_MAX_IFS);
13328 ifp = dhd->iflist[idx];
13333 /* At present all 3 variables are being
13336 ifp->parp_enable = val;
13337 ifp->parp_discard = val;
13338 ifp->parp_allnode = !val;
13340 /* Flush ARP entries when disabled */
13341 if (val == FALSE) {
13342 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
13343 FALSE, dhdp->tickcnt);
13348 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13350 dhd_info_t *dhd = dhdp->info;
13353 ASSERT(idx < DHD_MAX_IFS);
13355 ifp = dhd->iflist[idx];
13358 return ifp->parp_discard;
13362 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13364 dhd_info_t *dhd = dhdp->info;
13367 ASSERT(idx < DHD_MAX_IFS);
13369 ifp = dhd->iflist[idx];
13373 return ifp->parp_allnode;
13376 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
13378 dhd_info_t *dhd = dhdp->info;
13381 ASSERT(idx < DHD_MAX_IFS);
13383 ifp = dhd->iflist[idx];
13387 return ifp->dhcp_unicast;
13390 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
13392 dhd_info_t *dhd = dhdp->info;
13394 ASSERT(idx < DHD_MAX_IFS);
13395 ifp = dhd->iflist[idx];
13399 ifp->dhcp_unicast = val;
13403 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
13405 dhd_info_t *dhd = dhdp->info;
13408 ASSERT(idx < DHD_MAX_IFS);
13410 ifp = dhd->iflist[idx];
13414 return ifp->block_ping;
13417 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
13419 dhd_info_t *dhd = dhdp->info;
13421 ASSERT(idx < DHD_MAX_IFS);
13422 ifp = dhd->iflist[idx];
13426 ifp->block_ping = val;
13431 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
13433 dhd_info_t *dhd = dhdp->info;
13436 ASSERT(idx < DHD_MAX_IFS);
13438 ifp = dhd->iflist[idx];
13442 return ifp->grat_arp;
13445 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13447 dhd_info_t *dhd = dhdp->info;
13449 ASSERT(idx < DHD_MAX_IFS);
13450 ifp = dhd->iflist[idx];
13454 ifp->grat_arp = val;
13458 #endif /* DHD_L2_FILTER */
13461 #if defined(SET_RPS_CPUS)
13462 int dhd_rps_cpus_enable(struct net_device *net, int enable)
13464 dhd_info_t *dhd = DHD_DEV_INFO(net);
13467 char * RPS_CPU_SETBUF;
13469 ifidx = dhd_net2idx(dhd, net);
13470 if (ifidx == DHD_BAD_IF) {
13471 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
13475 if (ifidx == PRIMARY_INF) {
13476 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
13477 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
13478 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
13480 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
13481 RPS_CPU_SETBUF = RPS_CPUS_MASK;
13483 } else if (ifidx == VIRTUAL_INF) {
13484 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
13485 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
13487 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
13491 ifp = dhd->iflist[ifidx];
13494 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
13495 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
13497 custom_rps_map_clear(ifp->net->_rx);
13500 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
13506 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
13508 struct rps_map *old_map, *map;
13509 cpumask_var_t mask;
13511 static DEFINE_SPINLOCK(rps_map_lock);
13513 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13515 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
13516 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
13520 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
13522 free_cpumask_var(mask);
13523 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
13527 map = kzalloc(max_t(unsigned int,
13528 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
13531 free_cpumask_var(mask);
13532 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
13537 for_each_cpu(cpu, mask) {
13538 map->cpus[i++] = cpu;
13546 free_cpumask_var(mask);
13547 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
13551 spin_lock(&rps_map_lock);
13552 old_map = rcu_dereference_protected(queue->rps_map,
13553 lockdep_is_held(&rps_map_lock));
13554 rcu_assign_pointer(queue->rps_map, map);
13555 spin_unlock(&rps_map_lock);
13558 static_key_slow_inc(&rps_needed);
13561 kfree_rcu(old_map, rcu);
13562 static_key_slow_dec(&rps_needed);
13564 free_cpumask_var(mask);
13566 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
13570 void custom_rps_map_clear(struct netdev_rx_queue *queue)
13572 struct rps_map *map;
13574 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13576 map = rcu_dereference_protected(queue->rps_map, 1);
13578 RCU_INIT_POINTER(queue->rps_map, NULL);
13579 kfree_rcu(map, rcu);
13580 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
13587 #ifdef DHD_DEBUG_PAGEALLOC
13590 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
13592 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
13594 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
13595 __FUNCTION__, addr_corrupt, (uint32)len));
13597 DHD_OS_WAKE_LOCK(dhdp);
13598 prhex("Page Corruption:", addr_corrupt, len);
13599 dhd_dump_to_kernelog(dhdp);
13600 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13601 /* Load the dongle side dump to host memory and then BUG_ON() */
13602 dhdp->memdump_enabled = DUMP_MEMONLY;
13603 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
13604 dhd_bus_mem_dump(dhdp);
13605 #endif /* BCMPCIE && DHD_FW_COREDUMP */
13606 DHD_OS_WAKE_UNLOCK(dhdp);
13608 EXPORT_SYMBOL(dhd_page_corrupt_cb);
13609 #endif /* DHD_DEBUG_PAGEALLOC */
13611 #ifdef DHD_PKTID_AUDIT_ENABLED
13613 dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
13615 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
13616 DHD_OS_WAKE_LOCK(dhdp);
13617 dhd_dump_to_kernelog(dhdp);
13618 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13619 /* Load the dongle side dump to host memory and then BUG_ON() */
13620 dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
13621 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
13622 dhd_bus_mem_dump(dhdp);
13623 #endif /* BCMPCIE && DHD_FW_COREDUMP */
13624 DHD_OS_WAKE_UNLOCK(dhdp);
13626 #endif /* DHD_PKTID_AUDIT_ENABLED */
13628 /* ----------------------------------------------------------------------------
13629 * Infrastructure code for sysfs interface support for DHD
13631 * What is sysfs interface?
13632 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
13634 * Why sysfs interface?
13635 * This is the Linux standard way of changing/configuring Run Time parameters
13636 * for a driver. We can use this interface to control "linux" specific driver
13639 * -----------------------------------------------------------------------------
13642 #include <linux/sysfs.h>
13643 #include <linux/kobject.h>
13645 #if defined(DHD_TRACE_WAKE_LOCK)
13647 /* Function to show the history buffer */
13649 show_wklock_trace(struct dhd_info *dev, char *buf)
13652 dhd_info_t *dhd = (dhd_info_t *)dev;
13657 dhd_wk_lock_stats_dump(&dhd->pub);
13661 /* Function to enable/disable wakelock trace */
13663 wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
13665 unsigned long onoff;
13666 unsigned long flags;
13667 dhd_info_t *dhd = (dhd_info_t *)dev;
13669 onoff = bcm_strtoul(buf, NULL, 10);
13670 if (onoff != 0 && onoff != 1) {
13674 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
13675 trace_wklock_onoff = onoff;
13676 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
13677 if (trace_wklock_onoff) {
13678 printk("ENABLE WAKLOCK TRACE\n");
13680 printk("DISABLE WAKELOCK TRACE\n");
13683 return (ssize_t)(onoff+1);
13685 #endif /* DHD_TRACE_WAKE_LOCK */
13688 * Generic Attribute Structure for DHD.
13689 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
13690 * to instantiate an object of type dhd_attr, populate it with
13691 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
13692 * and add the object to default_attrs[] array, that gets registered
13693 * to the kobject of dhd (named bcm-dhd).
13697 struct attribute attr;
13698 ssize_t(*show)(struct dhd_info *, char *);
13699 ssize_t(*store)(struct dhd_info *, const char *, size_t count);
13702 #if defined(DHD_TRACE_WAKE_LOCK)
13703 static struct dhd_attr dhd_attr_wklock =
13704 __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
13705 #endif /* defined(DHD_TRACE_WAKE_LOCK */
13707 /* Attribute object that gets registered with "bcm-dhd" kobject tree */
13708 static struct attribute *default_attrs[] = {
13709 #if defined(DHD_TRACE_WAKE_LOCK)
13710 &dhd_attr_wklock.attr,
13715 #define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
13716 #define to_attr(a) container_of(a, struct dhd_attr, attr)
13719 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13720 * node under "bcm-dhd" the show function is called.
13722 static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
13724 dhd_info_t *dhd = to_dhd(kobj);
13725 struct dhd_attr *d_attr = to_attr(attr);
13729 ret = d_attr->show(dhd, buf);
13738 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13739 * node under "bcm-dhd" the store function is called.
13741 static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
13742 const char *buf, size_t count)
13744 dhd_info_t *dhd = to_dhd(kobj);
13745 struct dhd_attr *d_attr = to_attr(attr);
13749 ret = d_attr->store(dhd, buf, count);
13757 static struct sysfs_ops dhd_sysfs_ops = {
13759 .store = dhd_store,
13762 static struct kobj_type dhd_ktype = {
13763 .sysfs_ops = &dhd_sysfs_ops,
13764 .default_attrs = default_attrs,
13767 /* Create a kobject and attach to sysfs interface */
13768 static int dhd_sysfs_init(dhd_info_t *dhd)
13773 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13777 /* Initialize the kobject */
13778 ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
13780 kobject_put(&dhd->dhd_kobj);
13781 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
13786 * We are always responsible for sending the uevent that the kobject
13787 * was added to the system.
13789 kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
13794 /* Done with the kobject and detach the sysfs interface */
13795 static void dhd_sysfs_exit(dhd_info_t *dhd)
13798 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13802 /* Releae the kobject */
13803 kobject_put(&dhd->dhd_kobj);
13806 #ifdef DHD_LOG_DUMP
13808 dhd_log_dump_init(dhd_pub_t *dhd)
13810 spin_lock_init(&dhd->dld_buf.lock);
13811 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13812 dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
13813 DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
13815 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13816 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13818 if (!dhd->dld_buf.buffer) {
13819 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13820 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
13822 if (!dhd->dld_buf.buffer) {
13823 DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
13828 dhd->dld_buf.wraparound = 0;
13829 dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
13830 dhd->dld_buf.present = dhd->dld_buf.buffer;
13831 dhd->dld_buf.front = dhd->dld_buf.buffer;
13832 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13833 dhd->dld_enable = 1;
13837 dhd_log_dump_deinit(dhd_pub_t *dhd)
13839 dhd->dld_enable = 0;
13840 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13841 DHD_OS_PREFREE(dhd,
13842 dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13844 kfree(dhd->dld_buf.buffer);
13845 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13849 dhd_log_dump_print(const char *fmt, ...)
13852 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
13854 dhd_pub_t *dhd = NULL;
13855 unsigned long flags = 0;
13857 if (wl_get_bcm_cfg80211_ptr()) {
13858 dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
13861 if (!dhd || dhd->dld_enable != 1) {
13865 va_start(args, fmt);
13867 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
13872 /* make a critical section to eliminate race conditions */
13873 spin_lock_irqsave(&dhd->dld_buf.lock, flags);
13874 if (dhd->dld_buf.remain < len) {
13875 dhd->dld_buf.wraparound = 1;
13876 dhd->dld_buf.present = dhd->dld_buf.front;
13877 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13880 strncpy(dhd->dld_buf.present, tmp_buf, len);
13881 dhd->dld_buf.remain -= len;
13882 dhd->dld_buf.present += len;
13883 spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
13885 /* double check invalid memory operation */
13886 ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
13891 dhd_log_dump_get_timestamp(void)
13893 static char buf[16];
13895 unsigned long rem_nsec;
13897 ts_nsec = local_clock();
13898 rem_nsec = do_div(ts_nsec, 1000000000);
13899 snprintf(buf, sizeof(buf), "%5lu.%06lu",
13900 (unsigned long)ts_nsec, rem_nsec / 1000);
13905 #endif /* DHD_LOG_DUMP */
13907 /* ---------------------------- End of sysfs implementation ------------------------------------- */
13909 void *dhd_get_pub(struct net_device *dev)
13911 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
13913 return (void *)&dhdinfo->pub;
13918 bool dhd_os_wd_timer_enabled(void *bus)
13920 dhd_pub_t *pub = bus;
13921 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13923 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13925 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13928 return dhd->wd_timer_valid;