2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * $Copyright Open Broadcom Corporation$
7 * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $
14 #include <linux/syscalls.h>
15 #include <event_log.h>
16 #endif /* SHOW_LOGTRACE */
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/random.h>
28 #include <linux/spinlock.h>
29 #include <linux/ethtool.h>
30 #include <linux/fcntl.h>
33 #include <linux/reboot.h>
34 #include <linux/notifier.h>
35 #include <net/addrconf.h>
36 #ifdef ENABLE_ADAPTIVE_SCHED
37 #include <linux/cpufreq.h>
38 #endif /* ENABLE_ADAPTIVE_SCHED */
40 #include <asm/uaccess.h>
41 #include <asm/unaligned.h>
45 #include <bcmendian.h>
48 #include <proto/ethernet.h>
49 #include <proto/bcmevent.h>
50 #include <proto/vlan.h>
52 #include <proto/bcmicmp.h>
54 #include <proto/802.3.h>
56 #include <dngl_stats.h>
57 #include <dhd_linux_wq.h>
59 #include <dhd_linux.h>
60 #ifdef PCIE_FULL_DONGLE
61 #include <dhd_flowring.h>
64 #include <dhd_proto.h>
65 #include <dhd_config.h>
67 #ifdef CONFIG_HAS_WAKELOCK
68 #include <linux/wakelock.h>
71 #include <wl_cfg80211.h>
74 #include <wl_cfgp2p.h>
80 #include <proto/802.11_bta.h>
81 #include <proto/bt_amp_hci.h>
86 #include <linux/compat.h>
90 #include <dhd_wmf_linux.h>
93 #ifdef AMPDU_VO_ENABLE
94 #include <proto/802.1d.h>
95 #endif /* AMPDU_VO_ENABLE */
96 #ifdef DHDTCPACK_SUPPRESS
98 #endif /* DHDTCPACK_SUPPRESS */
100 #if defined(DHD_TCP_WINSIZE_ADJUST)
101 #include <linux/tcp.h>
103 #endif /* DHD_TCP_WINSIZE_ADJUST */
106 #include <linux/time.h>
109 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
110 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
111 #define TSMAX 1000 /* max no. of timing record kept */
114 static uint32 tsidx = 0;
115 static uint32 htsf_seqnum = 0;
117 struct timeval tsync;
118 static uint32 tsport = 5010;
120 typedef struct histo_ {
124 #if !ISPOWEROF2(DHD_SDALIGN)
125 #error DHD_SDALIGN is not a power of 2!
128 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
129 #endif /* WLMEDIA_HTSF */
131 #if defined(DHD_TCP_WINSIZE_ADJUST)
132 #define MIN_TCP_WIN_SIZE 18000
133 #define WIN_SIZE_SCALE_FACTOR 2
134 #define MAX_TARGET_PORTS 5
136 static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
137 static uint dhd_use_tcp_window_size_adjust = FALSE;
138 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
139 #endif /* DHD_TCP_WINSIZE_ADJUST */
143 extern bool ap_cfg_running;
144 extern bool ap_fw_loaded;
148 #ifdef ENABLE_ADAPTIVE_SCHED
149 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
150 #ifndef CUSTOM_CPUFREQ_THRESH
151 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
152 #endif /* CUSTOM_CPUFREQ_THRESH */
153 #endif /* ENABLE_ADAPTIVE_SCHED */
155 /* enable HOSTIP cache update from the host side when an eth0:N is up */
156 #define AOE_IP_ALIAS_SUPPORT 1
160 #include <bcm_rpc_tp.h>
163 #include <wlfc_proto.h>
164 #include <dhd_wlfc.h>
167 #include <wl_android.h>
169 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
171 #endif /* CUSTOMER_HW20 && WLANAUDIO */
173 /* Maximum STA per radio */
174 #define DHD_MAX_STA 32
177 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
178 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
179 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
181 #ifdef ARP_OFFLOAD_SUPPORT
182 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
183 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
184 unsigned long event, void *ptr);
185 static struct notifier_block dhd_inetaddr_notifier = {
186 .notifier_call = dhd_inetaddr_notifier_call
188 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
189 * created in kernel notifier link list (with 'next' pointing to itself)
191 static bool dhd_inetaddr_notifier_registered = FALSE;
192 #endif /* ARP_OFFLOAD_SUPPORT */
195 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
196 unsigned long event, void *ptr);
197 static struct notifier_block dhd_inet6addr_notifier = {
198 .notifier_call = dhd_inet6addr_notifier_call
200 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
201 * created in kernel notifier link list (with 'next' pointing to itself)
203 static bool dhd_inet6addr_notifier_registered = FALSE;
206 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
207 #include <linux/suspend.h>
208 volatile bool dhd_mmc_suspend = FALSE;
209 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
210 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
212 #if defined(OOB_INTR_ONLY)
213 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
215 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
216 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
218 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
219 MODULE_LICENSE("GPL v2");
220 #endif /* LinuxVer */
225 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
227 #ifndef PROP_TXSTATUS
228 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
230 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
232 #endif /* BCM_FD_AGGR */
235 extern bool dhd_wlfc_skip_fc(void);
236 extern void dhd_wlfc_plat_init(void *dhd);
237 extern void dhd_wlfc_plat_deinit(void *dhd);
238 #endif /* PROP_TXSTATUS */
240 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
246 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
248 /* Linux wireless extension support */
249 #if defined(WL_WIRELESS_EXT)
251 extern wl_iw_extra_params_t g_wl_iw_params;
252 #endif /* defined(WL_WIRELESS_EXT) */
254 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
255 #include <linux/earlysuspend.h>
256 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
258 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
260 #ifdef PKT_FILTER_SUPPORT
261 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
262 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
263 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
268 extern int dhd_read_macaddr(struct dhd_info *dhd);
270 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
273 extern int dhd_write_macaddr(struct ether_addr *mac);
275 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
279 #if defined(SOFTAP_TPUT_ENHANCE)
280 extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
281 extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
282 #endif /* SOFTAP_TPUT_ENHANCE */
286 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
287 void custom_rps_map_clear(struct netdev_rx_queue *queue);
288 #ifdef CONFIG_MACH_UNIVERSAL5433
289 #define RPS_CPUS_MASK "10"
291 #define RPS_CPUS_MASK "6"
292 #endif /* CONFIG_MACH_UNIVERSAL5433 */
293 #endif /* SET_RPS_CPUS */
295 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
296 static struct notifier_block dhd_reboot_notifier = {
297 .notifier_call = dhd_reboot_callback,
302 typedef struct dhd_if_event {
303 struct list_head list;
304 wl_event_data_if_t event;
305 char name[IFNAMSIZ+1];
306 uint8 mac[ETHER_ADDR_LEN];
309 /* Interface control information */
310 typedef struct dhd_if {
311 struct dhd_info *info; /* back pointer to dhd_info */
312 /* OS/stack specifics */
313 struct net_device *net;
314 int idx; /* iface idx in dongle */
315 uint subunit; /* subunit */
316 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
319 uint8 bssidx; /* bsscfg index for the interface */
320 bool attached; /* Delayed attachment when unset */
321 bool txflowcontrol; /* Per interface flow control indicator */
322 char name[IFNAMSIZ+1]; /* linux interface name */
323 struct net_device_stats stats;
325 dhd_wmf_t wmf; /* per bsscfg wmf setting */
327 #ifdef PCIE_FULL_DONGLE
328 struct list_head sta_list; /* sll of associated stations */
329 #if !defined(BCM_GMAC3)
330 spinlock_t sta_list_lock; /* lock for manipulating sll */
331 #endif /* ! BCM_GMAC3 */
332 #endif /* PCIE_FULL_DONGLE */
333 uint32 ap_isolate; /* ap-isolation settings */
346 uint32 coef; /* scaling factor */
347 uint32 coefdec1; /* first decimal */
348 uint32 coefdec2; /* second decimal */
358 static tstamp_t ts[TSMAX];
359 static tstamp_t maxdelayts;
360 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
362 #endif /* WLMEDIA_HTSF */
364 struct ipv6_work_info_t {
370 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
371 #define MAX_WLANAUDIO_BLACKLIST 4
373 struct wlanaudio_blacklist {
376 ulong txfail_jiffies;
377 struct ether_addr blacklist_addr;
379 #endif /* CUSTOMER_HW20 && WLANAUDIO */
381 /* When Perimeter locks are deployed, any blocking calls must be preceeded
382 * with a PERIM UNLOCK and followed by a PERIM LOCK.
383 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
384 * wait_event_timeout().
387 /* Local private structure (extension of pub) */
388 typedef struct dhd_info {
389 #if defined(WL_WIRELESS_EXT)
390 wl_iw_t iw; /* wireless extensions state (must be first) */
391 #endif /* defined(WL_WIRELESS_EXT) */
393 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
395 void *adapter; /* adapter information, interrupt, fw path etc. */
396 char fw_path[PATH_MAX]; /* path to firmware image */
397 char nv_path[PATH_MAX]; /* path to nvram vars file */
398 char conf_path[PATH_MAX]; /* path to config vars file */
400 struct semaphore proto_sem;
402 spinlock_t wlfc_spinlock;
404 #endif /* PROP_TXSTATUS */
408 wait_queue_head_t ioctl_resp_wait;
409 uint32 default_wd_interval;
411 struct timer_list timer;
413 struct tasklet_struct tasklet;
418 struct semaphore sdsem;
419 tsk_ctl_t thr_dpc_ctl;
420 tsk_ctl_t thr_wdt_ctl;
422 tsk_ctl_t thr_rxf_ctl;
424 bool rxthread_enabled;
427 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
428 struct wake_lock wl_wifi; /* Wifi wakelock */
429 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
430 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
431 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
432 #ifdef BCMPCIE_OOB_HOST_WAKE
433 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
434 #endif /* BCMPCIE_OOB_HOST_WAKE */
435 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
437 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
438 /* net_device interface lock, prevent race conditions among net_dev interface
439 * calls and wifi_on or wifi_off
441 struct mutex dhd_net_if_mutex;
442 struct mutex dhd_suspend_mutex;
444 spinlock_t wakelock_spinlock;
445 uint32 wakelock_counter;
446 int wakelock_wd_counter;
447 int wakelock_rx_timeout_enable;
448 int wakelock_ctrl_timeout_enable;
450 uint32 wakelock_before_waive;
452 /* Thread to issue ioctl for multicast */
453 wait_queue_head_t ctrl_wait;
454 atomic_t pend_8021x_cnt;
455 dhd_attach_states_t dhd_state;
457 dhd_event_log_t event_data;
458 #endif /* SHOW_LOGTRACE */
460 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
461 struct early_suspend early_suspend;
462 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
464 #ifdef ARP_OFFLOAD_SUPPORT
466 #endif /* ARP_OFFLOAD_SUPPORT */
470 struct timer_list rpcth_timer;
471 bool rpcth_timer_active;
474 #ifdef DHDTCPACK_SUPPRESS
475 spinlock_t tcpack_lock;
476 #endif /* DHDTCPACK_SUPPRESS */
477 void *dhd_deferred_wq;
478 #ifdef DEBUG_CPU_FREQ
479 struct notifier_block freq_trans;
480 int __percpu *new_freq;
483 struct notifier_block pm_notifier;
484 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
485 struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST];
486 bool is_wlanaudio_blist;
487 #endif /* CUSTOMER_HW20 && WLANAUDIO */
490 #define DHDIF_FWDER(dhdif) FALSE
492 /* Flag to indicate if we should download firmware on driver load */
493 uint dhd_download_fw_on_driverload = TRUE;
495 /* Definitions to provide path to the firmware and nvram
496 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
498 char firmware_path[MOD_PARAM_PATHLEN];
499 char nvram_path[MOD_PARAM_PATHLEN];
500 char config_path[MOD_PARAM_PATHLEN];
502 /* backup buffer for firmware and nvram path */
503 char fw_bak_path[MOD_PARAM_PATHLEN];
504 char nv_bak_path[MOD_PARAM_PATHLEN];
506 /* information string to keep firmware, chio, cheip version info visiable from log */
507 char info_string[MOD_PARAM_INFOLEN];
508 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
510 int disable_proptx = 0;
511 module_param(op_mode, int, 0644);
512 extern int wl_control_wl_start(struct net_device *dev);
513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
514 struct semaphore dhd_registration_sem;
515 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
517 /* deferred handlers */
518 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
519 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
520 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
521 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
523 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
527 extern void dhd_netdev_free(struct net_device *ndev);
528 #endif /* WL_CFG80211 */
531 module_param(dhd_msg_level, int, 0);
532 #if defined(WL_WIRELESS_EXT)
533 module_param(iw_msg_level, int, 0);
536 module_param(wl_dbg_level, int, 0);
538 module_param(android_msg_level, int, 0);
539 module_param(config_msg_level, int, 0);
541 #ifdef ARP_OFFLOAD_SUPPORT
542 /* ARP offload enable */
543 uint dhd_arp_enable = TRUE;
544 module_param(dhd_arp_enable, uint, 0);
546 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
548 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
550 module_param(dhd_arp_mode, uint, 0);
551 #endif /* ARP_OFFLOAD_SUPPORT */
553 /* Disable Prop tx */
554 module_param(disable_proptx, int, 0644);
555 /* load firmware and/or nvram values from the filesystem */
556 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
557 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
558 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
560 /* Watchdog interval */
562 /* extend watchdog expiration to 2 seconds when DPC is running */
563 #define WATCHDOG_EXTEND_INTERVAL (2000)
565 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
566 module_param(dhd_watchdog_ms, uint, 0);
568 #if defined(DHD_DEBUG)
569 /* Console poll interval */
570 uint dhd_console_ms = 0;
571 module_param(dhd_console_ms, uint, 0644);
572 #endif /* defined(DHD_DEBUG) */
575 uint dhd_slpauto = TRUE;
576 module_param(dhd_slpauto, uint, 0);
578 #ifdef PKT_FILTER_SUPPORT
579 /* Global Pkt filter enable control */
580 uint dhd_pkt_filter_enable = TRUE;
581 module_param(dhd_pkt_filter_enable, uint, 0);
584 /* Pkt filter init setup */
585 uint dhd_pkt_filter_init = 0;
586 module_param(dhd_pkt_filter_init, uint, 0);
588 /* Pkt filter mode control */
589 uint dhd_master_mode = FALSE;
590 module_param(dhd_master_mode, uint, 0);
592 int dhd_watchdog_prio = 0;
593 module_param(dhd_watchdog_prio, int, 0);
595 /* DPC thread priority */
596 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
597 module_param(dhd_dpc_prio, int, 0);
599 /* RX frame thread priority */
600 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
601 module_param(dhd_rxf_prio, int, 0);
603 int passive_channel_skip = 0;
604 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
606 #if !defined(BCMDHDUSB)
607 extern int dhd_dongle_ramsize;
608 module_param(dhd_dongle_ramsize, int, 0);
609 #endif /* BCMDHDUSB */
611 /* Keep track of number of instances */
612 static int dhd_found = 0;
613 static int instance_base = 0; /* Starting instance number */
614 module_param(instance_base, int, 0644);
616 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
617 dhd_info_t *dhd_global = NULL;
618 #endif /* CUSTOMER_HW20 && WLANAUDIO */
622 /* DHD Perimiter lock only used in router with bypass forwarding. */
623 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
624 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
625 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
626 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
627 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
629 #ifdef PCIE_FULL_DONGLE
630 #if defined(BCM_GMAC3)
631 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
632 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
633 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
634 #else /* ! BCM_GMAC3 */
635 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
636 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
637 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
638 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
639 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
640 #endif /* ! BCM_GMAC3 */
641 #endif /* PCIE_FULL_DONGLE */
643 /* Control fw roaming */
645 uint dhd_roam_disable = 0;
647 uint dhd_roam_disable = 0;
650 /* Control radio state */
651 uint dhd_radio_up = 1;
653 /* Network inteface name */
654 char iface_name[IFNAMSIZ] = {'\0'};
655 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
657 /* The following are specific to the SDIO dongle */
659 /* IOCTL response timeout */
660 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
662 /* Idle timeout for backplane clock */
663 int dhd_idletime = DHD_IDLETIME_TICKS;
664 module_param(dhd_idletime, int, 0);
667 uint dhd_poll = FALSE;
668 module_param(dhd_poll, uint, 0);
671 uint dhd_intr = TRUE;
672 module_param(dhd_intr, uint, 0);
674 /* SDIO Drive Strength (in milliamps) */
675 uint dhd_sdiod_drive_strength = 6;
676 module_param(dhd_sdiod_drive_strength, uint, 0);
680 extern uint dhd_txbound;
681 extern uint dhd_rxbound;
682 module_param(dhd_txbound, uint, 0);
683 module_param(dhd_rxbound, uint, 0);
685 /* Deferred transmits */
686 extern uint dhd_deferred_tx;
687 module_param(dhd_deferred_tx, uint, 0);
690 extern void dhd_dbg_init(dhd_pub_t *dhdp);
691 extern void dhd_dbg_remove(void);
692 #endif /* BCMDBGFS */
698 /* Echo packet generator (pkts/s) */
700 module_param(dhd_pktgen, uint, 0);
702 /* Echo packet len (0 => sawtooth, max 2040) */
703 uint dhd_pktgen_len = 0;
704 module_param(dhd_pktgen_len, uint, 0);
707 #if defined(BCMSUP_4WAY_HANDSHAKE)
708 /* Use in dongle supplicant for 4-way handshake */
709 uint dhd_use_idsup = 0;
710 module_param(dhd_use_idsup, uint, 0);
711 #endif /* BCMSUP_4WAY_HANDSHAKE */
713 extern char dhd_version[];
715 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
716 static void dhd_net_if_lock_local(dhd_info_t *dhd);
717 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
718 static void dhd_suspend_lock(dhd_pub_t *dhdp);
719 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
722 void htsf_update(dhd_info_t *dhd, void *data);
723 tsf_t prev_tsf, cur_tsf;
725 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
726 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
727 static void dhd_dump_latency(void);
728 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
729 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
730 static void dhd_dump_htsfhisto(histo_t *his, char *s);
731 #endif /* WLMEDIA_HTSF */
733 /* Monitor interface */
734 int dhd_monitor_init(void *dhd_pub);
735 int dhd_monitor_uninit(void);
738 #if defined(WL_WIRELESS_EXT)
739 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
740 #endif /* defined(WL_WIRELESS_EXT) */
742 static void dhd_dpc(ulong data);
744 extern int dhd_wait_pend8021x(struct net_device *dev);
745 void dhd_os_wd_timer_extend(void *bus, bool extend);
749 #error TOE requires BDC
751 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
752 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
755 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
756 wl_event_msg_t *event_ptr, void **data_ptr);
757 #ifdef DHD_UNICAST_DHCP
758 static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
759 static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
760 int *len_ptr, uint8 *prot_ptr);
761 static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
762 int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
764 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
765 #endif /* DHD_UNICAST_DHCP */
767 static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
769 #if defined(CONFIG_PM_SLEEP)
770 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
772 int ret = NOTIFY_DONE;
773 bool suspend = FALSE;
774 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
776 BCM_REFERENCE(dhdinfo);
778 case PM_HIBERNATION_PREPARE:
779 case PM_SUSPEND_PREPARE:
782 case PM_POST_HIBERNATION:
783 case PM_POST_SUSPEND:
788 #if defined(SUPPORT_P2P_GO_PS)
791 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
792 dhd_wlfc_suspend(&dhdinfo->pub);
793 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
795 dhd_wlfc_resume(&dhdinfo->pub);
797 #endif /* defined(SUPPORT_P2P_GO_PS) */
799 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
800 KERNEL_VERSION(2, 6, 39))
801 dhd_mmc_suspend = suspend;
808 static struct notifier_block dhd_pm_notifier = {
809 .notifier_call = dhd_pm_callback,
812 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
813 * created in kernel notifier link list (with 'next' pointing to itself)
815 static bool dhd_pm_notifier_registered = FALSE;
817 extern int register_pm_notifier(struct notifier_block *nb);
818 extern int unregister_pm_notifier(struct notifier_block *nb);
819 #endif /* CONFIG_PM_SLEEP */
821 /* Request scheduling of the bus rx frame */
822 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
823 static void dhd_os_rxflock(dhd_pub_t *pub);
824 static void dhd_os_rxfunlock(dhd_pub_t *pub);
826 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
827 typedef struct dhd_dev_priv {
828 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
829 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
830 int ifidx; /* interface index */
833 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
834 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
835 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
836 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
837 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
839 /** Clear the dhd net_device's private structure. */
841 dhd_dev_priv_clear(struct net_device * dev)
843 dhd_dev_priv_t * dev_priv;
844 ASSERT(dev != (struct net_device *)NULL);
845 dev_priv = DHD_DEV_PRIV(dev);
846 dev_priv->dhd = (dhd_info_t *)NULL;
847 dev_priv->ifp = (dhd_if_t *)NULL;
848 dev_priv->ifidx = DHD_BAD_IF;
851 /** Setup the dhd net_device's private structure. */
853 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
856 dhd_dev_priv_t * dev_priv;
857 ASSERT(dev != (struct net_device *)NULL);
858 dev_priv = DHD_DEV_PRIV(dev);
861 dev_priv->ifidx = ifidx;
864 #ifdef PCIE_FULL_DONGLE
866 /** Dummy objects are defined with state representing bad|down.
867 * Performance gains from reducing branch conditionals, instruction parallelism,
868 * dual issue, reducing load shadows, avail of larger pipelines.
869 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
870 * is accessed via the dhd_sta_t.
873 /* Dummy dhd_info object */
874 dhd_info_t dhd_info_null = {
875 #if defined(BCM_GMAC3)
879 .info = &dhd_info_null,
880 #ifdef DHDTCPACK_SUPPRESS
881 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
882 #endif /* DHDTCPACK_SUPPRESS */
883 .up = FALSE, .busstate = DHD_BUS_DOWN
886 #define DHD_INFO_NULL (&dhd_info_null)
887 #define DHD_PUB_NULL (&dhd_info_null.pub)
889 /* Dummy netdevice object */
890 struct net_device dhd_net_dev_null = {
891 .reg_state = NETREG_UNREGISTERED
893 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
895 /* Dummy dhd_if object */
896 dhd_if_t dhd_if_null = {
897 #if defined(BCM_GMAC3)
901 .wmf = { .wmf_enable = TRUE },
903 .info = DHD_INFO_NULL,
904 .net = DHD_NET_DEV_NULL,
907 #define DHD_IF_NULL (&dhd_if_null)
909 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
911 /** Interface STA list management. */
913 /** Fetch the dhd_if object, given the interface index in the dhd. */
914 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
916 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
917 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
918 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
920 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
921 static void dhd_if_del_sta_list(dhd_if_t * ifp);
922 static void dhd_if_flush_sta(dhd_if_t * ifp);
924 /* Construct/Destruct a sta pool. */
925 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
926 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
927 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
930 /* Return interface pointer */
931 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
933 ASSERT(ifidx < DHD_MAX_IFS);
935 if (ifidx >= DHD_MAX_IFS)
938 return dhdp->info->iflist[ifidx];
941 /** Reset a dhd_sta object and free into the dhd pool. */
943 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
947 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
949 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
950 id16_map_free(dhdp->staid_allocator, sta->idx);
951 for (prio = 0; prio < (int)NUMPRIO; prio++)
952 sta->flowid[prio] = FLOWID_INVALID;
953 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
954 sta->ifidx = DHD_BAD_IF;
955 bzero(sta->ea.octet, ETHER_ADDR_LEN);
956 INIT_LIST_HEAD(&sta->list);
957 sta->idx = ID16_INVALID; /* implying free */
960 /** Allocate a dhd_sta object from the dhd pool. */
962 dhd_sta_alloc(dhd_pub_t * dhdp)
966 dhd_sta_pool_t * sta_pool;
968 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
970 idx = id16_map_alloc(dhdp->staid_allocator);
971 if (idx == ID16_INVALID) {
972 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
976 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
977 sta = &sta_pool[idx];
979 ASSERT((sta->idx == ID16_INVALID) &&
980 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
981 sta->idx = idx; /* implying allocated */
986 /** Delete all STAs in an interface's STA list. */
988 dhd_if_del_sta_list(dhd_if_t *ifp)
990 dhd_sta_t *sta, *next;
993 DHD_IF_STA_LIST_LOCK(ifp, flags);
995 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
996 #if defined(BCM_GMAC3)
998 /* Remove sta from WOFA forwarder. */
999 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1001 #endif /* BCM_GMAC3 */
1002 list_del(&sta->list);
1003 dhd_sta_free(&ifp->info->pub, sta);
1006 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1011 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1013 dhd_if_flush_sta(dhd_if_t * ifp)
1015 #if defined(BCM_GMAC3)
1017 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1018 dhd_sta_t *sta, *next;
1019 unsigned long flags;
1021 DHD_IF_STA_LIST_LOCK(ifp, flags);
1023 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1024 /* Remove any sta entry from WOFA forwarder. */
1025 fwder_flush(ifp->fwdh, (wofa_t)sta);
1028 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1030 #endif /* BCM_GMAC3 */
1033 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1035 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1037 int idx, sta_pool_memsz;
1039 dhd_sta_pool_t * sta_pool;
1040 void * staid_allocator;
1042 ASSERT(dhdp != (dhd_pub_t *)NULL);
1043 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1045 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1046 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1047 if (staid_allocator == NULL) {
1048 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1052 /* Pre allocate a pool of dhd_sta objects (one extra). */
1053 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1054 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1055 if (sta_pool == NULL) {
1056 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1057 id16_map_fini(dhdp->osh, staid_allocator);
1061 dhdp->sta_pool = sta_pool;
1062 dhdp->staid_allocator = staid_allocator;
1064 /* Initialize all sta(s) for the pre-allocated free pool. */
1065 bzero((uchar *)sta_pool, sta_pool_memsz);
1066 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1067 sta = &sta_pool[idx];
1068 sta->idx = id16_map_alloc(staid_allocator);
1069 ASSERT(sta->idx <= max_sta);
1071 /* Now place them into the pre-allocated free pool. */
1072 for (idx = 1; idx <= max_sta; idx++) {
1073 sta = &sta_pool[idx];
1074 dhd_sta_free(dhdp, sta);
1080 /** Destruct the pool of dhd_sta_t objects.
1081 * Caller must ensure that no STA objects are currently associated with an if.
1084 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1086 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1090 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1091 for (idx = 1; idx <= max_sta; idx++) {
1092 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1093 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1095 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1096 dhdp->sta_pool = NULL;
1099 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1100 dhdp->staid_allocator = NULL;
1103 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1105 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1107 int idx, sta_pool_memsz;
1109 dhd_sta_pool_t * sta_pool;
1110 void *staid_allocator;
1113 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1117 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1118 staid_allocator = dhdp->staid_allocator;
1121 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1125 if (!staid_allocator) {
1126 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1130 /* clear free pool */
1131 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1132 bzero((uchar *)sta_pool, sta_pool_memsz);
1134 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1135 id16_map_clear(staid_allocator, max_sta, 1);
1137 /* Initialize all sta(s) for the pre-allocated free pool. */
1138 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1139 sta = &sta_pool[idx];
1140 sta->idx = id16_map_alloc(staid_allocator);
1141 ASSERT(sta->idx <= max_sta);
1143 /* Now place them into the pre-allocated free pool. */
1144 for (idx = 1; idx <= max_sta; idx++) {
1145 sta = &sta_pool[idx];
1146 dhd_sta_free(dhdp, sta);
1150 /** Find STA with MAC address ea in an interface's STA list. */
1152 dhd_find_sta(void *pub, int ifidx, void *ea)
1156 unsigned long flags;
1159 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1161 return DHD_STA_NULL;
1163 DHD_IF_STA_LIST_LOCK(ifp, flags);
1165 list_for_each_entry(sta, &ifp->sta_list, list) {
1166 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1167 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1172 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1174 return DHD_STA_NULL;
1177 /** Add STA into the interface's STA list. */
1179 dhd_add_sta(void *pub, int ifidx, void *ea)
1183 unsigned long flags;
1186 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1188 return DHD_STA_NULL;
1190 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1191 if (sta == DHD_STA_NULL) {
1192 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1193 return DHD_STA_NULL;
1196 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1198 /* link the sta and the dhd interface */
1201 INIT_LIST_HEAD(&sta->list);
1203 DHD_IF_STA_LIST_LOCK(ifp, flags);
1205 list_add_tail(&sta->list, &ifp->sta_list);
1207 #if defined(BCM_GMAC3)
1209 ASSERT(ISALIGNED(ea, 2));
1210 /* Add sta to WOFA forwarder. */
1211 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1213 #endif /* BCM_GMAC3 */
1215 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1220 /** Delete STA from the interface's STA list. */
1222 dhd_del_sta(void *pub, int ifidx, void *ea)
1224 dhd_sta_t *sta, *next;
1226 unsigned long flags;
1229 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1233 DHD_IF_STA_LIST_LOCK(ifp, flags);
1235 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1236 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1237 #if defined(BCM_GMAC3)
1238 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1239 ASSERT(ISALIGNED(ea, 2));
1240 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1242 #endif /* BCM_GMAC3 */
1243 list_del(&sta->list);
1244 dhd_sta_free(&ifp->info->pub, sta);
1248 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1253 /** Add STA if it doesn't exist. Not reentrant. */
1255 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1259 sta = dhd_find_sta(pub, ifidx, ea);
1263 sta = dhd_add_sta(pub, ifidx, ea);
1269 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1270 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1271 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1272 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1273 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1274 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1275 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1276 #endif /* PCIE_FULL_DONGLE */
1279 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1280 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1283 dhd_info_t *dhd = dhdp->info;
1286 ASSERT(bssidx < DHD_MAX_IFS);
1289 for (i = 0; i < DHD_MAX_IFS; i++) {
1290 ifp = dhd->iflist[i];
1291 if (ifp && (ifp->bssidx == bssidx)) {
1292 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1293 ifp->name, bssidx, i));
1300 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1306 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1310 dhd_os_rxflock(dhdp);
1311 store_idx = dhdp->store_idx;
1312 sent_idx = dhdp->sent_idx;
1313 if (dhdp->skbbuf[store_idx] != NULL) {
1314 /* Make sure the previous packets are processed */
1315 dhd_os_rxfunlock(dhdp);
1316 #ifdef RXF_DEQUEUE_ON_BUSY
1317 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1318 skb, store_idx, sent_idx));
1320 #else /* RXF_DEQUEUE_ON_BUSY */
1321 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1322 skb, store_idx, sent_idx));
1323 /* removed msleep here, should use wait_event_timeout if we
1324 * want to give rx frame thread a chance to run
1326 #if defined(WAIT_DEQUEUE)
1330 #endif /* RXF_DEQUEUE_ON_BUSY */
1332 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1333 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1334 dhdp->skbbuf[store_idx] = skb;
1335 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1336 dhd_os_rxfunlock(dhdp);
1341 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1347 dhd_os_rxflock(dhdp);
1349 store_idx = dhdp->store_idx;
1350 sent_idx = dhdp->sent_idx;
1351 skb = dhdp->skbbuf[sent_idx];
1354 dhd_os_rxfunlock(dhdp);
1355 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1356 store_idx, sent_idx));
1360 dhdp->skbbuf[sent_idx] = NULL;
1361 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1363 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1366 dhd_os_rxfunlock(dhdp);
1371 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1373 #ifndef CUSTOMER_HW10
1374 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1375 #endif /* !CUSTOMER_HW10 */
1377 if (prepost) { /* pre process */
1378 dhd_read_macaddr(dhd);
1379 } else { /* post process */
1380 dhd_write_macaddr(&dhd->pub.mac);
1386 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1388 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
1390 bool _apply = FALSE;
1391 /* In case of IBSS mode, apply arp pkt filter */
1392 if (op_mode & DHD_FLAG_IBSS_MODE) {
1396 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1397 if ((dhd->arp_version == 1) &&
1398 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
1406 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1408 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1409 #ifdef PKT_FILTER_SUPPORT
1411 dhd_set_packet_filter_mode(struct net_device *dev, char *command)
1413 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1415 dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
1419 dhd_set_packet_filter_ports(struct net_device *dev, char *command)
1421 int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
1422 uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
1423 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1424 dhd_pub_t *dhdp = &dhdi->pub;
1425 char iovbuf[WLC_IOCTL_SMLEN];
1428 action = bcm_strtoul(command, &command, 0);
1429 if (action > PKT_FILTER_PORTS_MAX)
1432 if (action == PKT_FILTER_PORTS_LOOPBACK) {
1433 /* echo the loopback value if port filter is supported else error */
1434 bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
1435 error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
1437 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
1441 if (strstr(iovbuf, "pktfltr2"))
1442 return bcm_strtoul(command, &command, 0);
1444 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
1445 return BCME_UNSUPPORTED;
1449 if (action == PKT_FILTER_PORTS_CLEAR) {
1450 /* action 0 is clear all ports */
1451 dhdp->pkt_filter_ports_count = 0;
1452 bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
1455 portnum = bcm_strtoul(command, &command, 0);
1457 /* no ports to add or remove */
1461 /* get configured ports */
1462 count = dhdp->pkt_filter_ports_count;
1463 ports = dhdp->pkt_filter_ports;
1465 if (action == PKT_FILTER_PORTS_ADD) {
1466 /* action 1 is add ports */
1468 /* copy new ports */
1469 while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
1470 for (i = 0; i < count; i++) {
1471 /* duplicate port */
1472 if (portnum == ports[i])
1475 if (portnum != ports[i])
1476 ports[count++] = portnum;
1477 portnum = bcm_strtoul(command, &command, 0);
1479 } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
1480 /* action 2 is remove ports */
1481 bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
1484 while (portnum != 0) {
1486 for (i = 0; i < get_count; i++) {
1487 if (portnum != get_ports[i])
1488 ports[count++] = get_ports[i];
1491 bcopy(ports, get_ports, count * sizeof(uint16));
1492 portnum = bcm_strtoul(command, &command, 0);
1495 dhdp->pkt_filter_ports_count = count;
1501 dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
1504 wl_pkt_filter_ports_t *portlist = NULL;
1505 const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
1506 + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
1507 char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
1508 char iovbuf[pkt_filter_ports_buf_len];
1510 DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
1511 enable, dhd->in_suspend, dhd->pkt_filter_mode,
1512 dhd->pkt_filter_ports_count));
1514 bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
1515 portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
1516 portlist->version = WL_PKT_FILTER_PORTS_VERSION;
1517 portlist->reserved = 0;
1520 if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY))
1523 /* enable port filter */
1524 dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
1525 if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH)
1526 /* whitelist mode: FORWARD_ON_MATCH */
1527 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1529 /* blacklist mode: DISCARD_ON_MATCH */
1530 dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
1532 portlist->count = dhd->pkt_filter_ports_count;
1533 bcopy(dhd->pkt_filter_ports, portlist->ports,
1534 dhd->pkt_filter_ports_count * sizeof(uint16));
1536 /* disable port filter */
1537 portlist->count = 0;
1538 dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
1539 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1542 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
1546 bcm_mkiovar("pkt_filter_ports",
1548 (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
1549 iovbuf, sizeof(iovbuf));
1550 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1552 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
1555 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
1556 sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
1557 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1559 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
1563 #endif /* PKT_FILTER_SUPPORT */
1564 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1566 void dhd_set_packet_filter(dhd_pub_t *dhd)
1568 #ifdef PKT_FILTER_SUPPORT
1571 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1572 if (dhd_pkt_filter_enable) {
1573 for (i = 0; i < dhd->pktfilter_count; i++) {
1574 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1577 #endif /* PKT_FILTER_SUPPORT */
1580 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1582 #ifdef PKT_FILTER_SUPPORT
1585 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
1587 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1588 dhd_enable_packet_filter_ports(dhd, value);
1589 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1591 /* 1 - Enable packet filter, only allow unicast packet to send up */
1592 /* 0 - Disable packet filter */
1593 if (dhd_pkt_filter_enable && (!value ||
1594 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1596 for (i = 0; i < dhd->pktfilter_count; i++) {
1597 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1598 if (value && (i == DHD_ARP_FILTER_NUM) &&
1599 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1600 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1601 "val %d, cnt %d, op_mode 0x%x\n",
1602 value, i, dhd->op_mode));
1605 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1606 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1607 value, dhd_master_mode);
1610 #endif /* PKT_FILTER_SUPPORT */
1613 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1615 #ifndef SUPPORT_PM2_ONLY
1616 int power_mode = PM_MAX;
1617 #endif /* SUPPORT_PM2_ONLY */
1618 /* wl_pkt_filter_enable_t enable_parm; */
1620 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1621 uint roamvar = dhd->conf->roam_off_suspend;
1622 uint nd_ra_filter = 0;
1628 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1629 __FUNCTION__, value, dhd->in_suspend));
1631 dhd_suspend_lock(dhd);
1633 #ifdef CUSTOM_SET_CPUCORE
1634 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1635 /* set specific cpucore */
1636 dhd_set_cpucore(dhd, TRUE);
1637 #endif /* CUSTOM_SET_CPUCORE */
1639 if (value && dhd->in_suspend) {
1640 #ifdef PKT_FILTER_SUPPORT
1641 dhd->early_suspended = 1;
1643 /* Kernel suspended */
1644 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
1646 #ifndef SUPPORT_PM2_ONLY
1647 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1648 sizeof(power_mode), TRUE, 0);
1649 #endif /* SUPPORT_PM2_ONLY */
1651 /* Enable packet filter, only allow unicast packet to send up */
1652 dhd_enable_packet_filter(1, dhd);
1654 /* If DTIM skip is set up as default, force it to wake
1655 * each third DTIM for better power savings. Note that
1656 * one side effect is a chance to miss BC/MC packet.
1658 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1659 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1660 4, iovbuf, sizeof(iovbuf));
1661 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
1663 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1665 /* Disable firmware roaming during suspend */
1666 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1667 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1668 if (FW_SUPPORTED(dhd, ndoe)) {
1669 /* enable IPv6 RA filter in firmware during suspend */
1671 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1672 iovbuf, sizeof(iovbuf));
1673 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1674 sizeof(iovbuf), TRUE, 0)) < 0)
1675 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1679 #ifdef PKT_FILTER_SUPPORT
1680 dhd->early_suspended = 0;
1682 /* Kernel resumed */
1683 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
1685 #ifndef SUPPORT_PM2_ONLY
1686 power_mode = PM_FAST;
1687 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1688 sizeof(power_mode), TRUE, 0);
1689 #endif /* SUPPORT_PM2_ONLY */
1690 #ifdef PKT_FILTER_SUPPORT
1691 /* disable pkt filter */
1692 dhd_enable_packet_filter(0, dhd);
1693 #endif /* PKT_FILTER_SUPPORT */
1695 /* restore pre-suspend setting for dtim_skip */
1696 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1697 4, iovbuf, sizeof(iovbuf));
1699 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1700 roamvar = dhd_roam_disable;
1701 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1702 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1703 if (FW_SUPPORTED(dhd, ndoe)) {
1704 /* disable IPv6 RA filter in firmware during suspend */
1706 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1707 iovbuf, sizeof(iovbuf));
1708 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1709 sizeof(iovbuf), TRUE, 0)) < 0)
1710 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1715 dhd_suspend_unlock(dhd);
1720 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
1722 dhd_pub_t *dhdp = &dhd->pub;
1725 DHD_OS_WAKE_LOCK(dhdp);
1726 DHD_PERIM_LOCK(dhdp);
1728 /* Set flag when early suspend was called */
1729 dhdp->in_suspend = val;
1730 if ((force || !dhdp->suspend_disable_flag) &&
1731 dhd_support_sta_mode(dhdp))
1733 ret = dhd_set_suspend(val, dhdp);
1736 DHD_PERIM_UNLOCK(dhdp);
1737 DHD_OS_WAKE_UNLOCK(dhdp);
1741 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1742 static void dhd_early_suspend(struct early_suspend *h)
1744 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1745 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1748 dhd_suspend_resume_helper(dhd, 1, 0);
1751 static void dhd_late_resume(struct early_suspend *h)
1753 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1754 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1757 dhd_suspend_resume_helper(dhd, 0, 0);
1759 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1762 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1763 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1765 * dhd_timeout_start(&tmo, usec);
1766 * while (!dhd_timeout_expired(&tmo))
1767 * if (poll_something())
1769 * if (dhd_timeout_expired(&tmo))
1774 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
1779 tmo->tick = jiffies_to_usecs(1);
1783 dhd_timeout_expired(dhd_timeout_t *tmo)
1785 /* Does nothing the first call */
1786 if (tmo->increment == 0) {
1791 if (tmo->elapsed >= tmo->limit)
1794 /* Add the delay that's about to take place */
1795 tmo->elapsed += tmo->increment;
1797 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1798 OSL_DELAY(tmo->increment);
1799 tmo->increment *= 2;
1800 if (tmo->increment > tmo->tick)
1801 tmo->increment = tmo->tick;
1803 wait_queue_head_t delay_wait;
1804 DECLARE_WAITQUEUE(wait, current);
1805 init_waitqueue_head(&delay_wait);
1806 add_wait_queue(&delay_wait, &wait);
1807 set_current_state(TASK_INTERRUPTIBLE);
1808 (void)schedule_timeout(1);
1809 remove_wait_queue(&delay_wait, &wait);
1810 set_current_state(TASK_RUNNING);
1817 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1822 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
1825 while (i < DHD_MAX_IFS) {
1826 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1834 struct net_device * dhd_idx2net(void *pub, int ifidx)
1836 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1837 struct dhd_info *dhd_info;
1839 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1841 dhd_info = dhd_pub->info;
1842 if (dhd_info && dhd_info->iflist[ifidx])
1843 return dhd_info->iflist[ifidx]->net;
1848 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1850 int i = DHD_MAX_IFS;
1854 if (name == NULL || *name == '\0')
1858 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1861 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1863 return i; /* default - the primary interface */
1867 dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
1869 int i = DHD_MAX_IFS;
1874 if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
1877 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
1879 return i; /* default - the primary interface */
1883 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1885 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1889 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1890 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1894 if (dhd->iflist[ifidx] == NULL) {
1895 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1899 if (dhd->iflist[ifidx]->net)
1900 return dhd->iflist[ifidx]->net->name;
1906 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1909 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1912 for (i = 0; i < DHD_MAX_IFS; i++)
1913 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1914 return dhd->iflist[i]->mac_addr;
1921 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1923 struct net_device *dev;
1924 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1925 struct netdev_hw_addr *ha;
1927 struct dev_mc_list *mclist;
1929 uint32 allmulti, cnt;
1936 ASSERT(dhd && dhd->iflist[ifidx]);
1937 dev = dhd->iflist[ifidx]->net;
1940 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1941 netif_addr_lock_bh(dev);
1943 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1944 cnt = netdev_mc_count(dev);
1946 cnt = dev->mc_count;
1947 #endif /* LINUX_VERSION_CODE */
1949 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1950 netif_addr_unlock_bh(dev);
1953 /* Determine initial value of allmulti flag */
1954 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
1956 /* Send down the multicast list first. */
1959 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
1960 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
1961 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1962 dhd_ifname(&dhd->pub, ifidx), cnt));
1966 strncpy(bufp, "mcast_list", buflen - 1);
1967 bufp[buflen - 1] = '\0';
1968 bufp += strlen("mcast_list") + 1;
1971 memcpy(bufp, &cnt, sizeof(cnt));
1972 bufp += sizeof(cnt);
1975 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1976 netif_addr_lock_bh(dev);
1978 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1979 netdev_for_each_mc_addr(ha, dev) {
1982 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
1983 bufp += ETHER_ADDR_LEN;
1987 for (mclist = dev->mc_list; (mclist && (cnt > 0));
1988 cnt--, mclist = mclist->next) {
1989 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
1990 bufp += ETHER_ADDR_LEN;
1992 #endif /* LINUX_VERSION_CODE */
1994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1995 netif_addr_unlock_bh(dev);
1998 memset(&ioc, 0, sizeof(ioc));
1999 ioc.cmd = WLC_SET_VAR;
2004 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2006 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2007 dhd_ifname(&dhd->pub, ifidx), cnt));
2008 allmulti = cnt ? TRUE : allmulti;
2011 MFREE(dhd->pub.osh, buf, buflen);
2013 /* Now send the allmulti setting. This is based on the setting in the
2014 * net_device flags, but might be modified above to be turned on if we
2015 * were trying to set some addresses and dongle rejected it...
2018 buflen = sizeof("allmulti") + sizeof(allmulti);
2019 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
2020 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
2023 allmulti = htol32(allmulti);
2025 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
2026 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2027 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
2028 MFREE(dhd->pub.osh, buf, buflen);
2033 memset(&ioc, 0, sizeof(ioc));
2034 ioc.cmd = WLC_SET_VAR;
2039 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2041 DHD_ERROR(("%s: set allmulti %d failed\n",
2042 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2045 MFREE(dhd->pub.osh, buf, buflen);
2047 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2049 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2051 allmulti = htol32(allmulti);
2053 memset(&ioc, 0, sizeof(ioc));
2054 ioc.cmd = WLC_SET_PROMISC;
2055 ioc.buf = &allmulti;
2056 ioc.len = sizeof(allmulti);
2059 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2061 DHD_ERROR(("%s: set promisc %d failed\n",
2062 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2067 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2073 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
2074 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
2077 memset(&ioc, 0, sizeof(ioc));
2078 ioc.cmd = WLC_SET_VAR;
2083 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2085 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2087 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2089 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2096 extern struct net_device *ap_net_dev;
2097 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
2101 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2103 dhd_info_t *dhd = handle;
2104 dhd_if_event_t *if_event = event_info;
2105 struct net_device *ndev;
2108 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2109 struct wireless_dev *vwdev, *primary_wdev;
2110 struct net_device *primary_ndev;
2111 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2113 if (event != DHD_WQ_WORK_IF_ADD) {
2114 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2119 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2124 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2128 dhd_net_if_lock_local(dhd);
2129 DHD_OS_WAKE_LOCK(&dhd->pub);
2130 DHD_PERIM_LOCK(&dhd->pub);
2132 ifidx = if_event->event.ifidx;
2133 bssidx = if_event->event.bssidx;
2134 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2136 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2137 if_event->mac, bssidx, TRUE);
2139 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2143 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2144 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
2145 if (unlikely(!vwdev)) {
2146 WL_ERR(("Could not allocate wireless device\n"));
2149 primary_ndev = dhd->pub.info->iflist[0]->net;
2150 primary_wdev = ndev_to_wdev(primary_ndev);
2151 vwdev->wiphy = primary_wdev->wiphy;
2152 vwdev->iftype = if_event->event.role;
2153 vwdev->netdev = ndev;
2154 ndev->ieee80211_ptr = vwdev;
2155 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
2156 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
2157 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2159 DHD_PERIM_UNLOCK(&dhd->pub);
2160 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2161 DHD_PERIM_LOCK(&dhd->pub);
2162 if (ret != BCME_OK) {
2163 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2164 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2167 #ifdef PCIE_FULL_DONGLE
2168 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2169 if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
2170 char iovbuf[WLC_IOCTL_SMLEN];
2173 memset(iovbuf, 0, sizeof(iovbuf));
2174 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
2175 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
2177 if (ret != BCME_OK) {
2178 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2179 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2182 #endif /* PCIE_FULL_DONGLE */
2184 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2186 DHD_PERIM_UNLOCK(&dhd->pub);
2187 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2188 dhd_net_if_unlock_local(dhd);
2192 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2194 dhd_info_t *dhd = handle;
2196 dhd_if_event_t *if_event = event_info;
2199 if (event != DHD_WQ_WORK_IF_DEL) {
2200 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2205 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2210 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2214 dhd_net_if_lock_local(dhd);
2215 DHD_OS_WAKE_LOCK(&dhd->pub);
2216 DHD_PERIM_LOCK(&dhd->pub);
2218 ifidx = if_event->event.ifidx;
2219 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
2221 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2223 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2225 DHD_PERIM_UNLOCK(&dhd->pub);
2226 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2227 dhd_net_if_unlock_local(dhd);
2231 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
2233 dhd_info_t *dhd = handle;
2234 dhd_if_t *ifp = event_info;
2236 if (event != DHD_WQ_WORK_SET_MAC) {
2237 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2241 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2245 dhd_net_if_lock_local(dhd);
2246 DHD_OS_WAKE_LOCK(&dhd->pub);
2247 DHD_PERIM_LOCK(&dhd->pub);
2251 unsigned long flags;
2253 DHD_GENERAL_LOCK(&dhd->pub, flags);
2254 in_ap = (ap_net_dev != NULL);
2255 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2258 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2265 if (ifp == NULL || !dhd->pub.up) {
2266 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2270 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
2271 ifp->set_macaddress = FALSE;
2272 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
2273 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
2275 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
2278 DHD_PERIM_UNLOCK(&dhd->pub);
2279 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2280 dhd_net_if_unlock_local(dhd);
2284 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
2286 dhd_info_t *dhd = handle;
2287 dhd_if_t *ifp = event_info;
2290 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
2291 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2296 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2300 dhd_net_if_lock_local(dhd);
2301 DHD_OS_WAKE_LOCK(&dhd->pub);
2302 DHD_PERIM_LOCK(&dhd->pub);
2307 unsigned long flags;
2308 DHD_GENERAL_LOCK(&dhd->pub, flags);
2309 in_ap = (ap_net_dev != NULL);
2310 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2313 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2315 ifp->set_multicast = FALSE;
2321 if (ifp == NULL || !dhd->pub.up) {
2322 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2329 _dhd_set_multicast_list(dhd, ifidx);
2330 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
2333 DHD_PERIM_UNLOCK(&dhd->pub);
2334 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2335 dhd_net_if_unlock_local(dhd);
2339 dhd_set_mac_address(struct net_device *dev, void *addr)
2343 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2344 struct sockaddr *sa = (struct sockaddr *)addr;
2348 ifidx = dhd_net2idx(dhd, dev);
2349 if (ifidx == DHD_BAD_IF)
2352 dhdif = dhd->iflist[ifidx];
2354 dhd_net_if_lock_local(dhd);
2355 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
2356 dhdif->set_macaddress = TRUE;
2357 dhd_net_if_unlock_local(dhd);
2358 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
2359 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
2364 dhd_set_multicast_list(struct net_device *dev)
2366 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2369 ifidx = dhd_net2idx(dhd, dev);
2370 if (ifidx == DHD_BAD_IF)
2373 dhd->iflist[ifidx]->set_multicast = TRUE;
2374 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
2375 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
2378 #ifdef PROP_TXSTATUS
2380 dhd_os_wlfc_block(dhd_pub_t *pub)
2382 dhd_info_t *di = (dhd_info_t *)(pub->info);
2384 spin_lock_bh(&di->wlfc_spinlock);
2389 dhd_os_wlfc_unblock(dhd_pub_t *pub)
2391 dhd_info_t *di = (dhd_info_t *)(pub->info);
2394 spin_unlock_bh(&di->wlfc_spinlock);
2398 #endif /* PROP_TXSTATUS */
2406 static const PKTTYPE_INFO packet_type_info[] =
2408 { ETHER_TYPE_IP, "IP" },
2409 { ETHER_TYPE_ARP, "ARP" },
2410 { ETHER_TYPE_BRCM, "BRCM" },
2411 { ETHER_TYPE_802_1X, "802.1X" },
2412 { ETHER_TYPE_WAI, "WAPI" },
2416 static const char *_get_packet_type_str(uint16 type)
2419 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
2421 for (i = 0; i < n; i++) {
2422 if (packet_type_info[i].type == type)
2423 return packet_type_info[i].str;
2426 return packet_type_info[n].str;
2428 #endif /* DHD_RX_DUMP */
2430 #if defined(DHD_8021X_DUMP)
2432 dhd_tx_dump(osl_t *osh, void *pkt)
2437 dump_data = PKTDATA(osh, pkt);
2438 protocol = (dump_data[12] << 8) | dump_data[13];
2440 DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol)));
2442 if (protocol == ETHER_TYPE_802_1X) {
2443 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2444 dump_data[14], dump_data[15], dump_data[30]));
2447 #if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
2451 datalen = PKTLEN(osh, pkt);
2453 for (i = 0; i < (datalen - 4); i++) {
2454 DHD_ERROR(("%02X ", dump_data[i]));
2460 #endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
2462 #endif /* DHD_8021X_DUMP */
2465 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
2468 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2469 struct ether_header *eh = NULL;
2471 /* Reject if down */
2472 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
2473 /* free the packet here since the caller won't */
2474 PKTFREE(dhdp->osh, pktbuf, TRUE);
2478 #ifdef PCIE_FULL_DONGLE
2479 if (dhdp->busstate == DHD_BUS_SUSPEND) {
2480 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
2481 PKTFREE(dhdp->osh, pktbuf, TRUE);
2484 #endif /* PCIE_FULL_DONGLE */
2486 #ifdef DHD_UNICAST_DHCP
2487 /* if dhcp_unicast is enabled, we need to convert the */
2488 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2489 if (dhdp->dhcp_unicast) {
2490 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
2492 #endif /* DHD_UNICAST_DHCP */
2493 /* Update multicast statistic */
2494 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
2495 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
2496 eh = (struct ether_header *)pktdata;
2498 if (ETHER_ISMULTI(eh->ether_dhost))
2499 dhdp->tx_multicast++;
2500 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
2501 atomic_inc(&dhd->pend_8021x_cnt);
2503 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2507 /* Look into the packet and update the packet priority */
2508 #ifndef PKTPRIO_OVERRIDE
2509 if (PKTPRIO(pktbuf) == 0)
2511 pktsetprio(pktbuf, FALSE);
2514 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
2516 * Lkup the per interface hash table, for a matching flowring. If one is not
2517 * available, allocate a unique flowid and add a flowring entry.
2518 * The found or newly created flowid is placed into the pktbuf's tag.
2520 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
2521 if (ret != BCME_OK) {
2522 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
2527 #ifdef PROP_TXSTATUS
2528 if (dhd_wlfc_is_supported(dhdp)) {
2529 /* store the interface ID */
2530 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
2532 /* store destination MAC in the tag as well */
2533 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
2535 /* decide which FIFO this packet belongs to */
2536 if (ETHER_ISMULTI(eh->ether_dhost))
2537 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
2538 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
2540 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
2542 #endif /* PROP_TXSTATUS */
2543 /* If the protocol uses a data header, apply it */
2544 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
2546 /* Use bus module to send data frame */
2548 dhd_htsf_addtxts(dhdp, pktbuf);
2550 #if defined(DHD_8021X_DUMP)
2551 dhd_tx_dump(dhdp->osh, pktbuf);
2553 #ifdef PROP_TXSTATUS
2555 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
2556 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
2557 /* non-proptxstatus way */
2559 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2561 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2562 #endif /* BCMPCIE */
2567 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2569 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2570 #endif /* BCMPCIE */
2571 #endif /* PROP_TXSTATUS */
2577 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
2582 dhd_info_t *dhd = DHD_DEV_INFO(net);
2583 dhd_if_t *ifp = NULL;
2586 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
2588 uint8 htsfdlystat_sz = 0;
2591 struct ether_header *eh;
2593 #endif /* DHD_WMF */
2595 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2597 DHD_OS_WAKE_LOCK(&dhd->pub);
2598 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2600 /* Reject if down */
2601 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
2602 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
2603 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
2604 netif_stop_queue(net);
2605 /* Send Event when bus down detected during data session */
2607 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
2608 net_os_send_hang_message(net);
2610 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2611 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2612 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2615 return NETDEV_TX_BUSY;
2619 ifp = DHD_DEV_IFP(net);
2620 ifidx = DHD_DEV_IFIDX(net);
2622 ASSERT(ifidx == dhd_net2idx(dhd, net));
2623 ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
2625 if (ifidx == DHD_BAD_IF) {
2626 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
2627 netif_stop_queue(net);
2628 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2629 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2630 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2633 return NETDEV_TX_BUSY;
2637 /* re-align socket buffer if "skb->data" is odd address */
2638 if (((unsigned long)(skb->data)) & 0x1) {
2639 unsigned char *data = skb->data;
2640 uint32 length = skb->len;
2641 PKTPUSH(dhd->pub.osh, skb, 1);
2642 memmove(skb->data, data, length);
2643 PKTSETLEN(dhd->pub.osh, skb, length);
2646 datalen = PKTLEN(dhd->pub.osh, skb);
2648 /* Make sure there's enough room for any header */
2650 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
2651 struct sk_buff *skb2;
2653 DHD_INFO(("%s: insufficient headroom\n",
2654 dhd_ifname(&dhd->pub, ifidx)));
2655 dhd->pub.tx_realloc++;
2657 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
2660 if ((skb = skb2) == NULL) {
2661 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
2662 dhd_ifname(&dhd->pub, ifidx)));
2668 /* Convert to packet */
2669 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
2670 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
2671 dhd_ifname(&dhd->pub, ifidx)));
2672 dev_kfree_skb_any(skb);
2677 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
2678 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
2679 struct ether_header *eh = (struct ether_header *)pktdata;
2681 if (!ETHER_ISMULTI(eh->ether_dhost) &&
2682 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
2683 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
2688 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
2689 iph = (uint8 *)eh + ETHER_HDR_LEN;
2691 /* WMF processing for multicast packets
2692 * Only IPv4 packets are handled
2694 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
2695 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
2696 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
2697 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2699 bool ucast_convert = FALSE;
2700 #ifdef DHD_UCAST_UPNP
2703 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
2704 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
2705 #endif /* DHD_UCAST_UPNP */
2706 #ifdef DHD_IGMP_UCQUERY
2707 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
2708 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
2709 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
2710 #endif /* DHD_IGMP_UCQUERY */
2711 if (ucast_convert) {
2713 unsigned long flags;
2715 DHD_IF_STA_LIST_LOCK(ifp, flags);
2717 /* Convert upnp/igmp query to unicast for each assoc STA */
2718 list_for_each_entry(sta, &ifp->sta_list, list) {
2719 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
2720 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2721 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2722 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2725 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
2728 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2729 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2730 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2732 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2733 return NETDEV_TX_OK;
2735 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
2737 /* There will be no STA info if the packet is coming from LAN host
2740 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
2744 /* Either taken by WMF or we should drop it.
2747 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2748 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2749 return NETDEV_TX_OK;
2751 /* Continue the transmit path */
2756 #endif /* DHD_WMF */
2758 #ifdef DHDTCPACK_SUPPRESS
2759 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
2760 /* If this packet has been hold or got freed, just return */
2761 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx))
2764 /* If this packet has replaced another packet and got freed, just return */
2765 if (dhd_tcpack_suppress(&dhd->pub, pktbuf))
2768 #endif /* DHDTCPACK_SUPPRESS */
2770 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
2774 ifp->stats.tx_dropped++;
2775 dhd->pub.tx_dropped++;
2779 #ifdef PROP_TXSTATUS
2780 /* tx_packets counter can counted only when wlfc is disabled */
2781 if (!dhd_wlfc_is_supported(&dhd->pub))
2784 dhd->pub.tx_packets++;
2785 ifp->stats.tx_packets++;
2786 ifp->stats.tx_bytes += datalen;
2790 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2791 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2793 /* Return ok: we always eat the packet */
2794 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2797 return NETDEV_TX_OK;
2803 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
2805 struct net_device *net;
2806 dhd_info_t *dhd = dhdp->info;
2809 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2813 if (ifidx == ALL_INTERFACES) {
2814 /* Flow control on all active interfaces */
2815 dhdp->txoff = state;
2816 for (i = 0; i < DHD_MAX_IFS; i++) {
2817 if (dhd->iflist[i]) {
2818 net = dhd->iflist[i]->net;
2820 netif_stop_queue(net);
2822 netif_wake_queue(net);
2827 if (dhd->iflist[ifidx]) {
2828 net = dhd->iflist[ifidx]->net;
2830 netif_stop_queue(net);
2832 netif_wake_queue(net);
2840 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
2842 dhd_info_t *dhd = dhdp->info;
2844 return dhd->rxthread_enabled;
2846 #endif /* DHD_WMF */
2849 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
2851 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2852 struct sk_buff *skb;
2855 void *data, *pnext = NULL;
2858 wl_event_msg_t event;
2861 void *skbhead = NULL;
2862 void *skbprev = NULL;
2863 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2866 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2868 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2870 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
2871 struct ether_header *eh;
2873 struct dot11_llc_snap_header *lsh;
2876 pnext = PKTNEXT(dhdp->osh, pktbuf);
2877 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
2879 ifp = dhd->iflist[ifidx];
2881 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
2883 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2887 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2889 /* Dropping only data packets before registering net device to avoid kernel panic */
2890 #ifndef PROP_TXSTATUS_VSDB
2891 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
2892 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2894 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
2895 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2896 #endif /* PROP_TXSTATUS_VSDB */
2898 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
2900 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2905 lsh = (struct dot11_llc_snap_header *)&eh[1];
2907 if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
2908 (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
2909 bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
2910 lsh->type == HTON16(BTA_PROT_L2CAP)) {
2911 amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
2912 ((uint8 *)eh + RFC1042_HDR_LEN);
2915 #endif /* WLBTAMP */
2917 #ifdef PROP_TXSTATUS
2918 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
2919 /* WLFC may send header only packet when
2920 there is an urgent message but no packet to
2923 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2927 #ifdef DHD_L2_FILTER
2928 /* If block_ping is enabled drop the ping packet */
2929 if (dhdp->block_ping) {
2930 if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
2931 PKTFREE(dhdp->osh, pktbuf, FALSE);
2937 /* WMF processing for multicast packets */
2938 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
2942 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
2943 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
2946 /* The packet is taken by WMF. Continue to next iteration */
2949 /* Packet DROP decision by WMF. Toss it */
2950 DHD_ERROR(("%s: WMF decides to drop packet\n",
2952 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2955 /* Continue the transmit path */
2959 #endif /* DHD_WMF */
2960 #ifdef DHDTCPACK_SUPPRESS
2961 dhd_tcpdata_info_get(dhdp, pktbuf);
2963 skb = PKTTONATIVE(dhdp->osh, pktbuf);
2965 ifp = dhd->iflist[ifidx];
2967 ifp = dhd->iflist[0];
2970 skb->dev = ifp->net;
2972 #ifdef PCIE_FULL_DONGLE
2973 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
2974 (!ifp->ap_isolate)) {
2975 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2976 if (ETHER_ISUCAST(eh->ether_dhost)) {
2977 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
2978 dhd_sendpkt(dhdp, ifidx, pktbuf);
2982 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
2983 dhd_sendpkt(dhdp, ifidx, npktbuf);
2986 #endif /* PCIE_FULL_DONGLE */
2988 /* Get the protocol, maintain skb around eth_type_trans()
2989 * The main reason for this hack is for the limitation of
2990 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
2991 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
2992 * coping of the packet coming from the network stack to add
2993 * BDC, Hardware header etc, during network interface registration
2994 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
2995 * for BDC, Hardware header etc. and not just the ETH_HLEN
3000 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3001 dump_data = skb->data;
3002 protocol = (dump_data[12] << 8) | dump_data[13];
3004 if (protocol == ETHER_TYPE_802_1X) {
3005 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3006 "ver %d, type %d, replay %d\n",
3007 dump_data[14], dump_data[15],
3010 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3011 #if defined(DHD_RX_DUMP)
3012 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
3013 if (protocol != ETHER_TYPE_BRCM) {
3014 if (dump_data[0] == 0xFF) {
3015 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
3017 if ((dump_data[12] == 8) &&
3018 (dump_data[13] == 6)) {
3019 DHD_ERROR(("%s: ARP %d\n",
3020 __FUNCTION__, dump_data[0x15]));
3022 } else if (dump_data[0] & 1) {
3023 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
3024 __FUNCTION__, MAC2STRDBG(dump_data)));
3026 #ifdef DHD_RX_FULL_DUMP
3029 for (k = 0; k < skb->len; k++) {
3030 DHD_ERROR(("%02X ", dump_data[k]));
3036 #endif /* DHD_RX_FULL_DUMP */
3038 #endif /* DHD_RX_DUMP */
3040 skb->protocol = eth_type_trans(skb, skb->dev);
3042 if (skb->pkt_type == PACKET_MULTICAST) {
3043 dhd->pub.rx_multicast++;
3044 ifp->stats.multicast++;
3051 dhd_htsf_addrxts(dhdp, pktbuf);
3053 /* Strip header, count, deliver upward */
3054 skb_pull(skb, ETH_HLEN);
3056 /* Process special event packets and then discard them */
3057 memset(&event, 0, sizeof(event));
3058 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
3059 dhd_wl_host_event(dhd, &ifidx,
3060 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3061 skb_mac_header(skb),
3064 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3068 wl_event_to_host_order(&event);
3070 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
3072 if (event.event_type == WLC_E_BTA_HCI_EVENT) {
3073 dhd_bta_doevt(dhdp, data, event.datalen);
3075 #endif /* WLBTAMP */
3077 #if defined(PNO_SUPPORT)
3078 if (event.event_type == WLC_E_PFN_NET_FOUND) {
3079 /* enforce custom wake lock to garantee that Kernel not suspended */
3080 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
3082 #endif /* PNO_SUPPORT */
3084 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3085 PKTFREE(dhdp->osh, pktbuf, FALSE);
3087 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3089 tout_rx = DHD_PACKET_TIMEOUT_MS;
3091 #ifdef PROP_TXSTATUS
3092 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
3093 #endif /* PROP_TXSTATUS */
3096 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
3097 ifp = dhd->iflist[ifidx];
3100 ifp->net->last_rx = jiffies;
3102 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
3103 dhdp->dstats.rx_bytes += skb->len;
3104 dhdp->rx_packets++; /* Local count */
3105 ifp->stats.rx_bytes += skb->len;
3106 ifp->stats.rx_packets++;
3108 #if defined(DHD_TCP_WINSIZE_ADJUST)
3109 if (dhd_use_tcp_window_size_adjust) {
3110 if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
3111 dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
3114 #endif /* DHD_TCP_WINSIZE_ADJUST */
3116 if (in_interrupt()) {
3119 if (dhd->rxthread_enabled) {
3123 PKTSETNEXT(dhdp->osh, skbprev, skb);
3127 /* If the receive is not processed inside an ISR,
3128 * the softirqd must be woken explicitly to service
3129 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3130 * by netif_rx_ni(), but in earlier kernels, we need
3131 * to do it manually.
3133 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3138 local_irq_save(flags);
3140 local_irq_restore(flags);
3141 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3146 if (dhd->rxthread_enabled && skbhead)
3147 dhd_sched_rxf(dhdp, skbhead);
3149 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
3150 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
3154 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
3156 /* Linux version has nothing to do */
3161 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
3163 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3164 struct ether_header *eh;
3170 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
3172 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
3173 type = ntoh16(eh->ether_type);
3175 if (type == ETHER_TYPE_802_1X)
3176 atomic_dec(&dhd->pend_8021x_cnt);
3179 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
3180 * If yes generate packet completion event.
3182 len = PKTLEN(dhdp->osh, txp);
3184 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
3185 if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
3186 struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
3188 if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
3189 ntoh16(lsh->type) == BTA_PROT_L2CAP) {
3191 dhd_bta_tx_hcidata_complete(dhdp, txp, success);
3194 #endif /* WLBTAMP */
3195 #ifdef PROP_TXSTATUS
3196 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
3197 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
3198 uint datalen = PKTLEN(dhd->pub.osh, txp);
3201 dhd->pub.tx_packets++;
3202 ifp->stats.tx_packets++;
3203 ifp->stats.tx_bytes += datalen;
3205 ifp->stats.tx_dropped++;
3211 static struct net_device_stats *
3212 dhd_get_stats(struct net_device *net)
3214 dhd_info_t *dhd = DHD_DEV_INFO(net);
3218 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3220 ifidx = dhd_net2idx(dhd, net);
3221 if (ifidx == DHD_BAD_IF) {
3222 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
3224 memset(&net->stats, 0, sizeof(net->stats));
3228 ifp = dhd->iflist[ifidx];
3232 /* Use the protocol to get dongle stats */
3233 dhd_prot_dstats(&dhd->pub);
3239 dhd_watchdog_thread(void *data)
3241 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3242 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3243 /* This thread doesn't need any user-level access,
3244 * so get rid of all our resources
3246 if (dhd_watchdog_prio > 0) {
3247 struct sched_param param;
3248 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
3249 dhd_watchdog_prio:(MAX_RT_PRIO-1);
3250 setScheduler(current, SCHED_FIFO, ¶m);
3254 if (down_interruptible (&tsk->sema) == 0) {
3255 unsigned long flags;
3256 unsigned long jiffies_at_start = jiffies;
3257 unsigned long time_lapse;
3259 SMP_RD_BARRIER_DEPENDS();
3260 if (tsk->terminated) {
3264 if (dhd->pub.dongle_reset == FALSE) {
3265 DHD_TIMER(("%s:\n", __FUNCTION__));
3267 /* Call the bus module watchdog */
3268 dhd_bus_watchdog(&dhd->pub);
3271 DHD_GENERAL_LOCK(&dhd->pub, flags);
3272 /* Count the tick for reference */
3274 time_lapse = jiffies - jiffies_at_start;
3276 /* Reschedule the watchdog */
3277 if (dhd->wd_timer_valid)
3278 mod_timer(&dhd->timer,
3280 msecs_to_jiffies(dhd_watchdog_ms) -
3281 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
3282 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3288 complete_and_exit(&tsk->completed, 0);
3291 static void dhd_watchdog(ulong data)
3293 dhd_info_t *dhd = (dhd_info_t *)data;
3294 unsigned long flags;
3296 if (dhd->pub.dongle_reset) {
3300 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
3301 up(&dhd->thr_wdt_ctl.sema);
3305 /* Call the bus module watchdog */
3306 dhd_bus_watchdog(&dhd->pub);
3308 DHD_GENERAL_LOCK(&dhd->pub, flags);
3309 /* Count the tick for reference */
3312 /* Reschedule the watchdog */
3313 if (dhd->wd_timer_valid)
3314 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
3315 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3319 #ifdef ENABLE_ADAPTIVE_SCHED
3321 dhd_sched_policy(int prio)
3323 struct sched_param param;
3324 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
3325 param.sched_priority = 0;
3326 setScheduler(current, SCHED_NORMAL, ¶m);
3328 if (get_scheduler_policy(current) != SCHED_FIFO) {
3329 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
3330 setScheduler(current, SCHED_FIFO, ¶m);
3334 #endif /* ENABLE_ADAPTIVE_SCHED */
3335 #ifdef DEBUG_CPU_FREQ
3336 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
3338 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
3339 struct cpufreq_freqs *freq = data;
3343 if (val == CPUFREQ_POSTCHANGE) {
3344 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3345 freq->new, freq->cpu));
3346 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
3352 #endif /* DEBUG_CPU_FREQ */
3354 dhd_dpc_thread(void *data)
3356 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3357 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3359 /* This thread doesn't need any user-level access,
3360 * so get rid of all our resources
3362 if (dhd_dpc_prio > 0)
3364 struct sched_param param;
3365 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
3366 setScheduler(current, SCHED_FIFO, ¶m);
3369 #ifdef CUSTOM_DPC_CPUCORE
3370 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
3372 if (dhd->pub.conf->dpc_cpucore >= 0) {
3373 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
3374 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
3377 #ifdef CUSTOM_SET_CPUCORE
3378 dhd->pub.current_dpc = current;
3379 #endif /* CUSTOM_SET_CPUCORE */
3380 /* Run until signal received */
3382 if (!binary_sema_down(tsk)) {
3383 #ifdef ENABLE_ADAPTIVE_SCHED
3384 dhd_sched_policy(dhd_dpc_prio);
3385 #endif /* ENABLE_ADAPTIVE_SCHED */
3386 SMP_RD_BARRIER_DEPENDS();
3387 if (tsk->terminated) {
3391 /* Call bus dpc unless it indicated down (then clean stop) */
3392 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3393 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
3394 while (dhd_bus_dpc(dhd->pub.bus)) {
3395 /* process all data */
3397 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
3398 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3402 dhd_bus_stop(dhd->pub.bus, TRUE);
3403 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3409 complete_and_exit(&tsk->completed, 0);
3413 dhd_rxf_thread(void *data)
3415 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3416 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3417 #if defined(WAIT_DEQUEUE)
3418 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
3419 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
3421 dhd_pub_t *pub = &dhd->pub;
3423 /* This thread doesn't need any user-level access,
3424 * so get rid of all our resources
3426 if (dhd_rxf_prio > 0)
3428 struct sched_param param;
3429 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
3430 setScheduler(current, SCHED_FIFO, ¶m);
3433 DAEMONIZE("dhd_rxf");
3434 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
3436 /* signal: thread has started */
3437 complete(&tsk->completed);
3438 #ifdef CUSTOM_SET_CPUCORE
3439 dhd->pub.current_rxf = current;
3440 #endif /* CUSTOM_SET_CPUCORE */
3441 /* Run until signal received */
3443 if (down_interruptible(&tsk->sema) == 0) {
3445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
3448 #ifdef ENABLE_ADAPTIVE_SCHED
3449 dhd_sched_policy(dhd_rxf_prio);
3450 #endif /* ENABLE_ADAPTIVE_SCHED */
3452 SMP_RD_BARRIER_DEPENDS();
3454 if (tsk->terminated) {
3457 skb = dhd_rxf_dequeue(pub);
3463 void *skbnext = PKTNEXT(pub->osh, skb);
3464 PKTSETNEXT(pub->osh, skb, NULL);
3466 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3470 local_irq_save(flags);
3472 local_irq_restore(flags);
3477 #if defined(WAIT_DEQUEUE)
3478 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
3480 watchdogTime = OSL_SYSUPTIME();
3484 DHD_OS_WAKE_UNLOCK(pub);
3489 complete_and_exit(&tsk->completed, 0);
3493 void dhd_dpc_kill(dhd_pub_t *dhdp)
3505 tasklet_kill(&dhd->tasklet);
3506 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
3508 #endif /* BCMPCIE */
3515 dhd = (dhd_info_t *)data;
3517 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
3518 * down below , wake lock is set,
3519 * the tasklet is initialized in dhd_attach()
3521 /* Call bus dpc unless it indicated down (then clean stop) */
3522 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3523 if (dhd_bus_dpc(dhd->pub.bus))
3524 tasklet_schedule(&dhd->tasklet);
3526 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3528 dhd_bus_stop(dhd->pub.bus, TRUE);
3529 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3534 dhd_sched_dpc(dhd_pub_t *dhdp)
3536 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3538 DHD_OS_WAKE_LOCK(dhdp);
3539 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
3540 /* If the semaphore does not get up,
3541 * wake unlock should be done here
3543 if (!binary_sema_up(&dhd->thr_dpc_ctl))
3544 DHD_OS_WAKE_UNLOCK(dhdp);
3547 tasklet_schedule(&dhd->tasklet);
3552 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
3554 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3555 #ifdef RXF_DEQUEUE_ON_BUSY
3558 #endif /* RXF_DEQUEUE_ON_BUSY */
3560 DHD_OS_WAKE_LOCK(dhdp);
3562 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
3563 #ifdef RXF_DEQUEUE_ON_BUSY
3565 ret = dhd_rxf_enqueue(dhdp, skb);
3566 if (ret == BCME_OK || ret == BCME_ERROR)
3569 OSL_SLEEP(50); /* waiting for dequeueing */
3570 } while (retry-- > 0);
3572 if (retry <= 0 && ret == BCME_BUSY) {
3576 void *skbnext = PKTNEXT(dhdp->osh, skbp);
3577 PKTSETNEXT(dhdp->osh, skbp, NULL);
3581 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
3584 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3585 up(&dhd->thr_rxf_ctl.sema);
3588 #else /* RXF_DEQUEUE_ON_BUSY */
3590 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
3593 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3594 up(&dhd->thr_rxf_ctl.sema);
3597 #endif /* RXF_DEQUEUE_ON_BUSY */
3601 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
3603 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
3609 memset(&ioc, 0, sizeof(ioc));
3611 ioc.cmd = WLC_GET_VAR;
3613 ioc.len = (uint)sizeof(buf);
3616 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3617 buf[sizeof(buf) - 1] = '\0';
3618 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3619 /* Check for older dongle image that doesn't support toe_ol */
3621 DHD_ERROR(("%s: toe not supported by device\n",
3622 dhd_ifname(&dhd->pub, ifidx)));
3626 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3630 memcpy(toe_ol, buf, sizeof(uint32));
3634 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
3636 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
3642 memset(&ioc, 0, sizeof(ioc));
3644 ioc.cmd = WLC_SET_VAR;
3646 ioc.len = (uint)sizeof(buf);
3649 /* Set toe_ol as requested */
3651 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3652 buf[sizeof(buf) - 1] = '\0';
3653 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
3655 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3656 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
3657 dhd_ifname(&dhd->pub, ifidx), ret));
3661 /* Enable toe globally only if any components are enabled. */
3663 toe = (toe_ol != 0);
3666 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
3668 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3669 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3677 #if defined(WL_CFG80211)
3678 void dhd_set_scb_probe(dhd_pub_t *dhd)
3680 #define NUM_SCB_MAX_PROBE 3
3682 wl_scb_probe_t scb_probe;
3683 char iovbuf[WL_EVENTING_MASK_LEN + 12];
3685 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
3687 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
3690 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
3692 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3693 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
3695 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
3697 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
3699 bcm_mkiovar("scb_probe", (char *)&scb_probe,
3700 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
3701 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3702 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
3703 #undef NUM_SCB_MAX_PROBE
3706 #endif /* WL_CFG80211 */
3708 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3710 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
3712 dhd_info_t *dhd = DHD_DEV_INFO(net);
3714 snprintf(info->driver, sizeof(info->driver), "wl");
3715 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
3718 struct ethtool_ops dhd_ethtool_ops = {
3719 .get_drvinfo = dhd_ethtool_get_drvinfo
3721 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
3724 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3726 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
3728 struct ethtool_drvinfo info;
3729 char drvname[sizeof(info.driver)];
3732 struct ethtool_value edata;
3733 uint32 toe_cmpnt, csum_dir;
3737 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3739 /* all ethtool calls start with a cmd word */
3740 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
3744 case ETHTOOL_GDRVINFO:
3745 /* Copy out any request driver name */
3746 if (copy_from_user(&info, uaddr, sizeof(info)))
3748 strncpy(drvname, info.driver, sizeof(info.driver));
3749 drvname[sizeof(info.driver)-1] = '\0';
3751 /* clear struct for return */
3752 memset(&info, 0, sizeof(info));
3755 /* if dhd requested, identify ourselves */
3756 if (strcmp(drvname, "?dhd") == 0) {
3757 snprintf(info.driver, sizeof(info.driver), "dhd");
3758 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
3759 info.version[sizeof(info.version) - 1] = '\0';
3762 /* otherwise, require dongle to be up */
3763 else if (!dhd->pub.up) {
3764 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
3768 /* finally, report dongle driver type */
3769 else if (dhd->pub.iswl)
3770 snprintf(info.driver, sizeof(info.driver), "wl");
3772 snprintf(info.driver, sizeof(info.driver), "xx");
3774 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
3775 if (copy_to_user(uaddr, &info, sizeof(info)))
3777 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
3778 (int)sizeof(drvname), drvname, info.driver));
3782 /* Get toe offload components from dongle */
3783 case ETHTOOL_GRXCSUM:
3784 case ETHTOOL_GTXCSUM:
3785 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3788 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3791 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
3793 if (copy_to_user(uaddr, &edata, sizeof(edata)))
3797 /* Set toe offload components in dongle */
3798 case ETHTOOL_SRXCSUM:
3799 case ETHTOOL_STXCSUM:
3800 if (copy_from_user(&edata, uaddr, sizeof(edata)))
3803 /* Read the current settings, update and write back */
3804 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3807 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3809 if (edata.data != 0)
3810 toe_cmpnt |= csum_dir;
3812 toe_cmpnt &= ~csum_dir;
3814 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
3817 /* If setting TX checksum mode, tell Linux the new mode */
3818 if (cmd == ETHTOOL_STXCSUM) {
3820 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
3822 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
3834 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3836 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
3841 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3848 dhd = (dhd_info_t *)dhdp->info;
3849 #if !defined(BCMPCIE)
3850 if (dhd->thr_dpc_ctl.thr_pid < 0) {
3851 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
3856 #ifdef CONFIG_MACH_UNIVERSAL5433
3857 /* old revision does not send hang message */
3858 if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
3860 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
3861 #endif /* CONFIG_MACH_UNIVERSAL5433 */
3862 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
3863 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
3864 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
3865 net_os_send_hang_message(net);
3871 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
3873 int bcmerror = BCME_OK;
3875 struct net_device *net;
3877 net = dhd_idx2net(pub, ifidx);
3879 bcmerror = BCME_BADARG;
3884 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
3886 /* check for local dhd ioctl and handle it */
3887 if (ioc->driver == DHD_IOCTL_MAGIC) {
3888 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
3890 pub->bcmerror = bcmerror;
3894 /* send to dongle (must be up, and wl). */
3895 if (pub->busstate != DHD_BUS_DATA) {
3896 bcmerror = BCME_DONGLE_DOWN;
3901 bcmerror = BCME_DONGLE_DOWN;
3906 * Flush the TX queue if required for proper message serialization:
3907 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
3908 * prevent M4 encryption and
3909 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
3910 * prevent disassoc frame being sent before WPS-DONE frame.
3912 if (ioc->cmd == WLC_SET_KEY ||
3913 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3914 strncmp("wsec_key", data_buf, 9) == 0) ||
3915 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3916 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
3917 ioc->cmd == WLC_DISASSOC)
3918 dhd_wait_pend8021x(net);
3922 /* short cut wl ioctl calls here */
3923 if (strcmp("htsf", data_buf) == 0) {
3924 dhd_ioctl_htsf_get(dhd, 0);
3928 if (strcmp("htsflate", data_buf) == 0) {
3930 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
3931 memset(&maxdelayts, 0, sizeof(tstamp_t));
3935 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3936 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3937 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3938 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3944 if (strcmp("htsfclear", data_buf) == 0) {
3945 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3946 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3947 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3948 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3952 if (strcmp("htsfhis", data_buf) == 0) {
3953 dhd_dump_htsfhisto(&vi_d1, "H to D");
3954 dhd_dump_htsfhisto(&vi_d2, "D to D");
3955 dhd_dump_htsfhisto(&vi_d3, "D to H");
3956 dhd_dump_htsfhisto(&vi_d4, "H to H");
3959 if (strcmp("tsport", data_buf) == 0) {
3961 memcpy(&tsport, data_buf + 7, 4);
3963 DHD_ERROR(("current timestamp port: %d \n", tsport));
3968 #endif /* WLMEDIA_HTSF */
3970 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
3971 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
3973 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3975 bcmerror = BCME_UNSUPPORTED;
3979 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3982 dhd_check_hang(net, pub, bcmerror);
3988 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
3990 dhd_info_t *dhd = DHD_DEV_INFO(net);
3995 void *local_buf = NULL;
3998 DHD_OS_WAKE_LOCK(&dhd->pub);
3999 DHD_PERIM_LOCK(&dhd->pub);
4001 /* Interface up check for built-in type */
4002 if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
4003 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
4004 DHD_PERIM_UNLOCK(&dhd->pub);
4005 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4009 /* send to dongle only if we are not waiting for reload already */
4010 if (dhd->pub.hang_was_sent) {
4011 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
4012 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
4013 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4014 return OSL_ERROR(BCME_DONGLE_DOWN);
4017 ifidx = dhd_net2idx(dhd, net);
4018 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
4020 if (ifidx == DHD_BAD_IF) {
4021 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
4022 DHD_PERIM_UNLOCK(&dhd->pub);
4023 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4027 #if defined(WL_WIRELESS_EXT)
4028 /* linux wireless extensions */
4029 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
4030 /* may recurse, do NOT lock */
4031 ret = wl_iw_ioctl(net, ifr, cmd);
4032 DHD_PERIM_UNLOCK(&dhd->pub);
4033 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4036 #endif /* defined(WL_WIRELESS_EXT) */
4038 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4039 if (cmd == SIOCETHTOOL) {
4040 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
4041 DHD_PERIM_UNLOCK(&dhd->pub);
4042 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4045 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4047 if (cmd == SIOCDEVPRIVATE+1) {
4048 ret = wl_android_priv_cmd(net, ifr, cmd);
4049 dhd_check_hang(net, &dhd->pub, ret);
4050 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4054 if (cmd != SIOCDEVPRIVATE) {
4055 DHD_PERIM_UNLOCK(&dhd->pub);
4056 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4060 memset(&ioc, 0, sizeof(ioc));
4062 #ifdef CONFIG_COMPAT
4063 if (is_compat_task()) {
4064 compat_wl_ioctl_t compat_ioc;
4065 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
4066 bcmerror = BCME_BADADDR;
4069 ioc.cmd = compat_ioc.cmd;
4070 ioc.buf = compat_ptr(compat_ioc.buf);
4071 ioc.len = compat_ioc.len;
4072 ioc.set = compat_ioc.set;
4073 ioc.used = compat_ioc.used;
4074 ioc.needed = compat_ioc.needed;
4075 /* To differentiate between wl and dhd read 4 more byes */
4076 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
4077 sizeof(uint)) != 0)) {
4078 bcmerror = BCME_BADADDR;
4082 #endif /* CONFIG_COMPAT */
4084 /* Copy the ioc control structure part of ioctl request */
4085 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
4086 bcmerror = BCME_BADADDR;
4090 /* To differentiate between wl and dhd read 4 more byes */
4091 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
4092 sizeof(uint)) != 0)) {
4093 bcmerror = BCME_BADADDR;
4098 if (!capable(CAP_NET_ADMIN)) {
4099 bcmerror = BCME_EPERM;
4104 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
4105 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
4106 bcmerror = BCME_NOMEM;
4110 DHD_PERIM_UNLOCK(&dhd->pub);
4111 if (copy_from_user(local_buf, ioc.buf, buflen)) {
4112 DHD_PERIM_LOCK(&dhd->pub);
4113 bcmerror = BCME_BADADDR;
4116 DHD_PERIM_LOCK(&dhd->pub);
4118 *(char *)(local_buf + buflen) = '\0';
4121 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
4123 if (!bcmerror && buflen && local_buf && ioc.buf) {
4124 DHD_PERIM_UNLOCK(&dhd->pub);
4125 if (copy_to_user(ioc.buf, local_buf, buflen))
4127 DHD_PERIM_LOCK(&dhd->pub);
4132 MFREE(dhd->pub.osh, local_buf, buflen+1);
4134 DHD_PERIM_UNLOCK(&dhd->pub);
4135 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4137 return OSL_ERROR(bcmerror);
4140 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
4141 int dhd_deepsleep(dhd_info_t *dhd, int flag)
4152 case 1 : /* Deepsleep on */
4153 DHD_ERROR(("dhd_deepsleep: ON\n"));
4154 /* give some time to sysioc_work before deepsleep */
4156 #ifdef PKT_FILTER_SUPPORT
4157 /* disable pkt filter */
4158 dhd_enable_packet_filter(0, dhdp);
4159 #endif /* PKT_FILTER_SUPPORT */
4162 memset(iovbuf, 0, sizeof(iovbuf));
4163 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4164 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4166 /* Enable Deepsleep */
4168 memset(iovbuf, 0, sizeof(iovbuf));
4169 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4170 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4173 case 0: /* Deepsleep Off */
4174 DHD_ERROR(("dhd_deepsleep: OFF\n"));
4176 /* Disable Deepsleep */
4177 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
4179 memset(iovbuf, 0, sizeof(iovbuf));
4180 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4181 iovbuf, sizeof(iovbuf));
4182 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
4183 sizeof(iovbuf), TRUE, 0);
4185 memset(iovbuf, 0, sizeof(iovbuf));
4186 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4187 iovbuf, sizeof(iovbuf));
4188 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
4189 sizeof(iovbuf), FALSE, 0)) < 0) {
4190 DHD_ERROR(("the error of dhd deepsleep status"
4191 " ret value :%d\n", ret));
4193 if (!(*(int *)iovbuf)) {
4194 DHD_ERROR(("deepsleep mode is 0,"
4195 " count: %d\n", cnt));
4203 memset(iovbuf, 0, sizeof(iovbuf));
4204 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4205 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4213 dhd_stop(struct net_device *net)
4216 dhd_info_t *dhd = DHD_DEV_INFO(net);
4217 DHD_OS_WAKE_LOCK(&dhd->pub);
4218 DHD_PERIM_LOCK(&dhd->pub);
4219 printk("%s: Enter %p\n", __FUNCTION__, net);
4220 if (dhd->pub.up == 0) {
4224 dhd_if_flush_sta(DHD_DEV_IFP(net));
4227 ifidx = dhd_net2idx(dhd, net);
4228 BCM_REFERENCE(ifidx);
4230 /* Set state and stop OS transmissions */
4231 netif_stop_queue(net);
4236 wl_cfg80211_down(NULL);
4239 * For CFG80211: Clean up all the left over virtual interfaces
4240 * when the primary Interface is brought down. [ifconfig wlan0 down]
4242 if (!dhd_download_fw_on_driverload) {
4243 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
4244 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
4247 dhd_net_if_lock_local(dhd);
4248 for (i = 1; i < DHD_MAX_IFS; i++)
4249 dhd_remove_if(&dhd->pub, i, FALSE);
4250 dhd_net_if_unlock_local(dhd);
4254 #endif /* WL_CFG80211 */
4256 #ifdef PROP_TXSTATUS
4257 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
4259 /* Stop the protocol module */
4260 dhd_prot_stop(&dhd->pub);
4262 OLD_MOD_DEC_USE_COUNT;
4264 if (ifidx == 0 && !dhd_download_fw_on_driverload)
4265 wl_android_wifi_off(net);
4267 if (dhd->pub.conf->deepsleep)
4268 dhd_deepsleep(dhd, 1);
4270 dhd->pub.rxcnt_timeout = 0;
4271 dhd->pub.txcnt_timeout = 0;
4273 dhd->pub.hang_was_sent = 0;
4275 /* Clear country spec for for built-in type driver */
4276 if (!dhd_download_fw_on_driverload) {
4277 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
4278 dhd->pub.dhd_cspec.rev = 0;
4279 dhd->pub.dhd_cspec.ccode[0] = 0x00;
4282 printk("%s: Exit\n", __FUNCTION__);
4283 DHD_PERIM_UNLOCK(&dhd->pub);
4284 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4288 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
4289 extern bool g_first_broadcast_scan;
4293 static int dhd_interworking_enable(dhd_pub_t *dhd)
4295 char iovbuf[WLC_IOCTL_SMLEN];
4296 uint32 enable = true;
4299 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
4300 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4301 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
4304 if (ret == BCME_OK) {
4305 /* basic capabilities for HS20 REL2 */
4306 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
4307 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
4308 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4309 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4310 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
4319 dhd_open(struct net_device *net)
4321 dhd_info_t *dhd = DHD_DEV_INFO(net);
4328 printk("%s: Enter %p\n", __FUNCTION__, net);
4329 #if defined(MULTIPLE_SUPPLICANT)
4330 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4331 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4332 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
4334 mutex_lock(&_dhd_sdio_mutex_lock_);
4336 #endif /* MULTIPLE_SUPPLICANT */
4338 DHD_OS_WAKE_LOCK(&dhd->pub);
4339 DHD_PERIM_LOCK(&dhd->pub);
4340 dhd->pub.dongle_trap_occured = 0;
4341 dhd->pub.hang_was_sent = 0;
4345 * Force start if ifconfig_up gets called before START command
4346 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4347 * This should be removed in the future
4349 ret = wl_control_wl_start(net);
4351 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4357 ifidx = dhd_net2idx(dhd, net);
4358 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4361 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
4366 if (!dhd->iflist[ifidx]) {
4367 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
4373 atomic_set(&dhd->pend_8021x_cnt, 0);
4374 if (!dhd_download_fw_on_driverload) {
4375 DHD_ERROR(("\n%s\n", dhd_version));
4376 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
4377 g_first_broadcast_scan = TRUE;
4379 ret = wl_android_wifi_on(net);
4381 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
4382 __FUNCTION__, ret));
4388 if (dhd->pub.busstate != DHD_BUS_DATA) {
4390 /* try to bring up bus */
4391 DHD_PERIM_UNLOCK(&dhd->pub);
4392 ret = dhd_bus_start(&dhd->pub);
4393 DHD_PERIM_LOCK(&dhd->pub);
4395 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4401 if (dhd_download_fw_on_driverload) {
4402 if (dhd->pub.conf->deepsleep)
4403 dhd_deepsleep(dhd, 0);
4406 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
4407 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
4410 /* Get current TOE mode from dongle */
4411 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
4412 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
4414 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
4417 #if defined(WL_CFG80211)
4418 if (unlikely(wl_cfg80211_up(NULL))) {
4419 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
4423 dhd_set_scb_probe(&dhd->pub);
4424 #endif /* WL_CFG80211 */
4427 /* Allow transmit calls */
4428 netif_start_queue(net);
4432 dhd_dbg_init(&dhd->pub);
4435 OLD_MOD_INC_USE_COUNT;
4440 DHD_PERIM_UNLOCK(&dhd->pub);
4441 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4443 #if defined(MULTIPLE_SUPPLICANT)
4444 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4445 mutex_unlock(&_dhd_sdio_mutex_lock_);
4447 #endif /* MULTIPLE_SUPPLICANT */
4449 printk("%s: Exit ret=%d\n", __FUNCTION__, ret);
4453 int dhd_do_driver_init(struct net_device *net)
4455 dhd_info_t *dhd = NULL;
4458 DHD_ERROR(("Primary Interface not initialized \n"));
4462 #ifdef MULTIPLE_SUPPLICANT
4463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
4464 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4465 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
4468 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
4469 #endif /* MULTIPLE_SUPPLICANT */
4471 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
4472 dhd = DHD_DEV_INFO(net);
4474 /* If driver is already initialized, do nothing
4476 if (dhd->pub.busstate == DHD_BUS_DATA) {
4477 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
4481 if (dhd_open(net) < 0) {
4482 DHD_ERROR(("Driver Init Failed \n"));
4490 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4494 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4498 /* handle IF event caused by wl commands, SoftAP, WEXT and
4499 * anything else. This has to be done asynchronously otherwise
4500 * DPC will be blocked (and iovars will timeout as DPC has no chance
4501 * to read the response back)
4503 if (ifevent->ifidx > 0) {
4504 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4506 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4507 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4508 strncpy(if_event->name, name, IFNAMSIZ);
4509 if_event->name[IFNAMSIZ - 1] = '\0';
4510 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
4511 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
4518 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4520 dhd_if_event_t *if_event;
4522 #if defined(WL_CFG80211) && !defined(P2PONEINT)
4523 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4525 #endif /* WL_CFG80211 */
4527 /* handle IF event caused by wl commands, SoftAP, WEXT and
4530 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4531 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4532 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4533 strncpy(if_event->name, name, IFNAMSIZ);
4534 if_event->name[IFNAMSIZ - 1] = '\0';
4535 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
4536 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
4541 /* unregister and free the existing net_device interface (if any) in iflist and
4542 * allocate a new one. the slot is reused. this function does NOT register the
4543 * new interface to linux kernel. dhd_register_if does the job
4546 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
4547 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
4549 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4552 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
4553 ifp = dhdinfo->iflist[ifidx];
4556 if (ifp->net != NULL) {
4557 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
4559 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
4561 /* in unregister_netdev case, the interface gets freed by net->destructor
4562 * (which is set to free_netdev)
4564 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4565 free_netdev(ifp->net);
4567 netif_stop_queue(ifp->net);
4569 unregister_netdev(ifp->net);
4571 unregister_netdevice(ifp->net);
4576 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
4578 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
4583 memset(ifp, 0, sizeof(dhd_if_t));
4584 ifp->info = dhdinfo;
4586 ifp->bssidx = bssidx;
4588 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
4590 /* Allocate etherdev, including space for private structure */
4591 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
4592 if (ifp->net == NULL) {
4593 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
4597 /* Setup the dhd interface's netdevice private structure. */
4598 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
4600 if (name && name[0]) {
4601 strncpy(ifp->net->name, name, IFNAMSIZ);
4602 ifp->net->name[IFNAMSIZ - 1] = '\0';
4606 ifp->net->destructor = free_netdev;
4608 ifp->net->destructor = dhd_netdev_free;
4610 ifp->net->destructor = free_netdev;
4611 #endif /* WL_CFG80211 */
4612 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
4613 ifp->name[IFNAMSIZ - 1] = '\0';
4614 dhdinfo->iflist[ifidx] = ifp;
4616 #ifdef PCIE_FULL_DONGLE
4617 /* Initialize STA info list */
4618 INIT_LIST_HEAD(&ifp->sta_list);
4619 DHD_IF_STA_LIST_LOCK_INIT(ifp);
4620 #endif /* PCIE_FULL_DONGLE */
4626 if (ifp->net != NULL) {
4627 dhd_dev_priv_clear(ifp->net);
4628 free_netdev(ifp->net);
4631 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4634 dhdinfo->iflist[ifidx] = NULL;
4638 /* unregister and free the the net_device interface associated with the indexed
4639 * slot, also free the slot memory and set the slot pointer to NULL
4642 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
4644 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4647 ifp = dhdinfo->iflist[ifidx];
4649 if (ifp->net != NULL) {
4650 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
4652 /* in unregister_netdev case, the interface gets freed by net->destructor
4653 * (which is set to free_netdev)
4655 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4656 free_netdev(ifp->net);
4658 netif_stop_queue(ifp->net);
4663 custom_rps_map_clear(ifp->net->_rx);
4664 #endif /* SET_RPS_CPUS */
4666 unregister_netdev(ifp->net);
4668 unregister_netdevice(ifp->net);
4673 dhd_wmf_cleanup(dhdpub, ifidx);
4674 #endif /* DHD_WMF */
4676 dhd_if_del_sta_list(ifp);
4678 dhdinfo->iflist[ifidx] = NULL;
4679 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4686 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4687 static struct net_device_ops dhd_ops_pri = {
4688 .ndo_open = dhd_open,
4689 .ndo_stop = dhd_stop,
4690 .ndo_get_stats = dhd_get_stats,
4691 .ndo_do_ioctl = dhd_ioctl_entry,
4692 .ndo_start_xmit = dhd_start_xmit,
4693 .ndo_set_mac_address = dhd_set_mac_address,
4694 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4695 .ndo_set_rx_mode = dhd_set_multicast_list,
4697 .ndo_set_multicast_list = dhd_set_multicast_list,
4701 static struct net_device_ops dhd_ops_virt = {
4702 .ndo_get_stats = dhd_get_stats,
4703 .ndo_do_ioctl = dhd_ioctl_entry,
4704 .ndo_start_xmit = dhd_start_xmit,
4705 .ndo_set_mac_address = dhd_set_mac_address,
4706 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4707 .ndo_set_rx_mode = dhd_set_multicast_list,
4709 .ndo_set_multicast_list = dhd_set_multicast_list,
4714 extern int wl_cfgp2p_if_open(struct net_device *net);
4715 extern int wl_cfgp2p_if_stop(struct net_device *net);
4717 static struct net_device_ops dhd_cfgp2p_ops_virt = {
4718 .ndo_open = wl_cfgp2p_if_open,
4719 .ndo_stop = wl_cfgp2p_if_stop,
4720 .ndo_get_stats = dhd_get_stats,
4721 .ndo_do_ioctl = dhd_ioctl_entry,
4722 .ndo_start_xmit = dhd_start_xmit,
4723 .ndo_set_mac_address = dhd_set_mac_address,
4724 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4725 .ndo_set_rx_mode = dhd_set_multicast_list,
4727 .ndo_set_multicast_list = dhd_set_multicast_list,
4730 #endif /* P2PONEINT */
4731 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
4734 extern void debugger_init(void *bus_handle);
4738 #ifdef SHOW_LOGTRACE
4739 static char *logstrs_path = "/root/logstrs.bin";
4740 module_param(logstrs_path, charp, S_IRUGO);
4743 dhd_init_logstrs_array(dhd_event_log_t *temp)
4745 struct file *filep = NULL;
4748 char *raw_fmts = NULL;
4749 int logstrs_size = 0;
4751 logstr_header_t *hdr = NULL;
4752 uint32 *lognums = NULL;
4753 char *logstrs = NULL;
4761 filep = filp_open(logstrs_path, O_RDONLY, 0);
4762 if (IS_ERR(filep)) {
4763 DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__));
4766 error = vfs_stat(logstrs_path, &stat);
4768 DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__));
4771 logstrs_size = (int) stat.size;
4773 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
4774 if (raw_fmts == NULL) {
4775 DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
4778 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
4779 DHD_ERROR(("Error: Log strings file read failed\n"));
4783 /* Remember header from the logstrs.bin file */
4784 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
4785 sizeof(logstr_header_t));
4787 if (hdr->log_magic == LOGSTRS_MAGIC) {
4789 * logstrs.bin start with header.
4791 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
4792 ram_index = (hdr->ram_lognums_offset -
4793 hdr->rom_lognums_offset) / sizeof(uint32);
4794 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
4795 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
4798 * Legacy logstrs.bin format without header.
4800 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
4801 if (num_fmts == 0) {
4802 /* Legacy ROM/RAM logstrs.bin format:
4803 * - ROM 'lognums' section
4804 * - RAM 'lognums' section
4805 * - ROM 'logstrs' section.
4806 * - RAM 'logstrs' section.
4808 * 'lognums' is an array of indexes for the strings in the
4809 * 'logstrs' section. The first uint32 is 0 (index of first
4810 * string in ROM 'logstrs' section).
4812 * The 4324b5 is the only ROM that uses this legacy format. Use the
4813 * fixed number of ROM fmtnums to find the start of the RAM
4814 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
4815 * find the ROM 'logstrs' section.
4817 #define NUM_4324B5_ROM_FMTS 186
4818 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
4819 ram_index = NUM_4324B5_ROM_FMTS;
4820 lognums = (uint32 *) raw_fmts;
4821 num_fmts = ram_index;
4822 logstrs = (char *) &raw_fmts[num_fmts << 2];
4823 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
4825 logstrs = (char *) &raw_fmts[num_fmts << 2];
4828 /* Legacy RAM-only logstrs.bin format:
4829 * - RAM 'lognums' section
4830 * - RAM 'logstrs' section.
4832 * 'lognums' is an array of indexes for the strings in the
4833 * 'logstrs' section. The first uint32 is an index to the
4834 * start of 'logstrs'. Therefore, if this index is divided
4835 * by 'sizeof(uint32)' it provides the number of logstr
4839 lognums = (uint32 *) raw_fmts;
4840 logstrs = (char *) &raw_fmts[num_fmts << 2];
4843 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
4845 DHD_ERROR(("Failed to allocate fmts memory\n"));
4849 for (i = 0; i < num_fmts; i++) {
4850 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
4851 * (they are 0-indexed relative to 'rom_logstrs_offset').
4853 * RAM lognums are already indexed to point to the correct RAM logstrs (they
4854 * are 0-indexed relative to the start of the logstrs.bin file).
4856 if (i == ram_index) {
4859 fmts[i] = &logstrs[lognums[i]];
4862 temp->raw_fmts = raw_fmts;
4863 temp->num_fmts = num_fmts;
4864 filp_close(filep, NULL);
4873 filp_close(filep, NULL);
4878 #endif /* SHOW_LOGTRACE */
4882 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
4884 dhd_info_t *dhd = NULL;
4885 struct net_device *net = NULL;
4886 char if_name[IFNAMSIZ] = {'\0'};
4887 uint32 bus_type = -1;
4888 uint32 bus_num = -1;
4889 uint32 slot_num = -1;
4890 wifi_adapter_info_t *adapter = NULL;
4892 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
4893 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4895 /* will implement get_ids for DBUS later */
4896 #if defined(BCMSDIO)
4897 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
4899 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
4901 /* Allocate primary dhd_info */
4902 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
4904 dhd = MALLOC(osh, sizeof(dhd_info_t));
4906 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
4910 memset(dhd, 0, sizeof(dhd_info_t));
4911 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
4913 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
4916 dhd->adapter = adapter;
4918 #ifdef GET_CUSTOM_MAC_ENABLE
4919 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
4920 #endif /* GET_CUSTOM_MAC_ENABLE */
4921 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
4922 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
4924 /* Initialize thread based operation and lock */
4925 sema_init(&dhd->sdsem, 1);
4927 /* Link to info module */
4928 dhd->pub.info = dhd;
4931 /* Link to bus module */
4933 dhd->pub.hdrlen = bus_hdrlen;
4935 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
4936 * because dhd_detech will check .info is NULL or not.
4938 if (dhd_conf_attach(&dhd->pub) != 0) {
4939 DHD_ERROR(("dhd_conf_attach failed\n"));
4942 dhd_conf_reset(&dhd->pub);
4943 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
4944 dhd_conf_preinit(&dhd->pub);
4946 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
4947 * This is indeed a hack but we have to make it work properly before we have a better
4950 dhd_update_fw_nv_path(dhd);
4951 #ifndef BUILD_IN_KERNEL
4952 dhd_conf_read_config(&dhd->pub, dhd->conf_path);
4955 /* Set network interface name if it was provided as module parameter */
4956 if (iface_name[0]) {
4959 strncpy(if_name, iface_name, IFNAMSIZ);
4960 if_name[IFNAMSIZ - 1] = 0;
4961 len = strlen(if_name);
4962 ch = if_name[len - 1];
4963 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
4964 strcat(if_name, "%d");
4966 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
4969 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
4971 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4974 net->netdev_ops = NULL;
4977 sema_init(&dhd->proto_sem, 1);
4979 #ifdef PROP_TXSTATUS
4980 spin_lock_init(&dhd->wlfc_spinlock);
4982 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
4983 dhd->pub.plat_init = dhd_wlfc_plat_init;
4984 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
4985 #endif /* PROP_TXSTATUS */
4987 /* Initialize other structure content */
4988 init_waitqueue_head(&dhd->ioctl_resp_wait);
4989 init_waitqueue_head(&dhd->ctrl_wait);
4991 /* Initialize the spinlocks */
4992 spin_lock_init(&dhd->sdlock);
4993 spin_lock_init(&dhd->txqlock);
4994 spin_lock_init(&dhd->dhd_lock);
4995 spin_lock_init(&dhd->rxf_lock);
4996 #if defined(RXFRAME_THREAD)
4997 dhd->rxthread_enabled = TRUE;
4998 #endif /* defined(RXFRAME_THREAD) */
5000 #ifdef DHDTCPACK_SUPPRESS
5001 spin_lock_init(&dhd->tcpack_lock);
5002 #endif /* DHDTCPACK_SUPPRESS */
5004 /* Initialize Wakelock stuff */
5005 spin_lock_init(&dhd->wakelock_spinlock);
5006 dhd->wakelock_counter = 0;
5007 dhd->wakelock_wd_counter = 0;
5008 dhd->wakelock_rx_timeout_enable = 0;
5009 dhd->wakelock_ctrl_timeout_enable = 0;
5010 #ifdef CONFIG_HAS_WAKELOCK
5011 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
5012 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
5013 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
5014 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
5015 #ifdef BCMPCIE_OOB_HOST_WAKE
5016 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
5017 #endif /* BCMPCIE_OOB_HOST_WAKE */
5018 #endif /* CONFIG_HAS_WAKELOCK */
5019 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
5020 mutex_init(&dhd->dhd_net_if_mutex);
5021 mutex_init(&dhd->dhd_suspend_mutex);
5023 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
5025 /* Attach and link in the protocol */
5026 if (dhd_prot_attach(&dhd->pub) != 0) {
5027 DHD_ERROR(("dhd_prot_attach failed\n"));
5030 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
5033 /* Attach and link in the cfg80211 */
5034 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
5035 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5039 dhd_monitor_init(&dhd->pub);
5040 dhd_state |= DHD_ATTACH_STATE_CFG80211;
5042 #if defined(WL_WIRELESS_EXT)
5043 /* Attach and link in the iw */
5044 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5045 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
5046 DHD_ERROR(("wl_iw_attach failed\n"));
5049 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
5051 #endif /* defined(WL_WIRELESS_EXT) */
5053 #ifdef SHOW_LOGTRACE
5054 dhd_init_logstrs_array(&dhd->event_data);
5055 #endif /* SHOW_LOGTRACE */
5057 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
5058 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
5063 /* Set up the watchdog timer */
5064 init_timer(&dhd->timer);
5065 dhd->timer.data = (ulong)dhd;
5066 dhd->timer.function = dhd_watchdog;
5067 dhd->default_wd_interval = dhd_watchdog_ms;
5069 if (dhd_watchdog_prio >= 0) {
5070 /* Initialize watchdog thread */
5071 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
5074 dhd->thr_wdt_ctl.thr_pid = -1;
5078 debugger_init((void *) bus);
5081 /* Set up the bottom half handler */
5082 if (dhd_dpc_prio >= 0) {
5083 /* Initialize DPC thread */
5084 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
5086 /* use tasklet for dpc */
5087 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
5088 dhd->thr_dpc_ctl.thr_pid = -1;
5091 if (dhd->rxthread_enabled) {
5092 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
5093 /* Initialize RXF thread */
5094 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
5097 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
5099 #if defined(CONFIG_PM_SLEEP)
5100 if (!dhd_pm_notifier_registered) {
5101 dhd_pm_notifier_registered = TRUE;
5102 register_pm_notifier(&dhd_pm_notifier);
5104 #endif /* CONFIG_PM_SLEEP */
5106 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5107 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
5108 dhd->early_suspend.suspend = dhd_early_suspend;
5109 dhd->early_suspend.resume = dhd_late_resume;
5110 register_early_suspend(&dhd->early_suspend);
5111 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
5112 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5114 #ifdef ARP_OFFLOAD_SUPPORT
5115 dhd->pend_ipaddr = 0;
5116 if (!dhd_inetaddr_notifier_registered) {
5117 dhd_inetaddr_notifier_registered = TRUE;
5118 register_inetaddr_notifier(&dhd_inetaddr_notifier);
5120 #endif /* ARP_OFFLOAD_SUPPORT */
5122 if (!dhd_inet6addr_notifier_registered) {
5123 dhd_inet6addr_notifier_registered = TRUE;
5124 register_inet6addr_notifier(&dhd_inet6addr_notifier);
5127 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
5128 #ifdef DEBUG_CPU_FREQ
5129 dhd->new_freq = alloc_percpu(int);
5130 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
5131 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
5133 #ifdef DHDTCPACK_SUPPRESS
5135 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
5136 #elif defined(BCMPCIE)
5137 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
5139 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5140 #endif /* BCMSDIO */
5141 #endif /* DHDTCPACK_SUPPRESS */
5143 dhd_state |= DHD_ATTACH_STATE_DONE;
5144 dhd->dhd_state = dhd_state;
5147 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
5149 #endif /* CUSTOMER_HW20 && WLANAUDIO */
5153 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
5154 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5155 __FUNCTION__, dhd_state, &dhd->pub));
5156 dhd->dhd_state = dhd_state;
5157 dhd_detach(&dhd->pub);
5158 dhd_free(&dhd->pub);
5164 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
5166 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
5167 return DHD_FLAG_HOSTAP_MODE;
5168 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
5169 return DHD_FLAG_P2P_MODE;
5170 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
5171 return DHD_FLAG_IBSS_MODE;
5172 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
5173 return DHD_FLAG_MFG_MODE;
5175 return DHD_FLAG_STA_MODE;
5178 extern int rkwifi_set_firmware(char *fw, char *nvram);
5180 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
5185 const char *fw = NULL;
5186 const char *nv = NULL;
5187 const char *conf = NULL;
5188 char firmware[100] = {0};
5189 char nvram[100] = {0};
5190 //char config[100] = "";
5191 wifi_adapter_info_t *adapter = dhdinfo->adapter;
5194 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5195 * The path from adapter info is used for initialization only (as it won't change).
5197 * The firmware_path/nvram_path module parameter may be changed by the system at run
5198 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5199 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5200 * module parameter after it is copied. We won't update the path until the module parameter
5201 * is changed again (first character is not '\0')
5204 /* set default firmware and nvram path for built-in type driver */
5205 // if (!dhd_download_fw_on_driverload) {
5206 rkwifi_set_firmware(firmware, nvram);
5207 #ifdef CONFIG_BCMDHD_FW_PATH
5208 fw = CONFIG_BCMDHD_FW_PATH;
5211 #endif /* CONFIG_BCMDHD_FW_PATH */
5212 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5213 nv = CONFIG_BCMDHD_NVRAM_PATH;
5216 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5219 /* check if we need to initialize the path */
5220 if (dhdinfo->fw_path[0] == '\0') {
5221 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
5222 fw = adapter->fw_path;
5225 if (dhdinfo->nv_path[0] == '\0') {
5226 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
5227 nv = adapter->nv_path;
5229 if (dhdinfo->conf_path[0] == '\0') {
5230 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
5231 conf = adapter->conf_path;
5234 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5236 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5238 if (firmware_path[0] != '\0')
5240 if (nvram_path[0] != '\0')
5242 if (config_path[0] != '\0')
5245 if (fw && fw[0] != '\0') {
5246 fw_len = strlen(fw);
5247 if (fw_len >= sizeof(dhdinfo->fw_path)) {
5248 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5251 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
5252 if (dhdinfo->fw_path[fw_len-1] == '\n')
5253 dhdinfo->fw_path[fw_len-1] = '\0';
5255 if (nv && nv[0] != '\0') {
5256 nv_len = strlen(nv);
5257 if (nv_len >= sizeof(dhdinfo->nv_path)) {
5258 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5261 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
5262 if (dhdinfo->nv_path[nv_len-1] == '\n')
5263 dhdinfo->nv_path[nv_len-1] = '\0';
5265 if (conf && conf[0] != '\0') {
5266 conf_len = strlen(conf);
5267 if (conf_len >= sizeof(dhdinfo->conf_path)) {
5268 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
5271 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
5272 if (dhdinfo->conf_path[conf_len-1] == '\n')
5273 dhdinfo->conf_path[conf_len-1] = '\0';
5277 /* clear the path in module parameter */
5278 firmware_path[0] = '\0';
5279 nvram_path[0] = '\0';
5280 config_path[0] = '\0';
5283 #ifndef BCMEMBEDIMAGE
5284 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5285 if (dhdinfo->fw_path[0] == '\0') {
5286 DHD_ERROR(("firmware path not found\n"));
5289 if (dhdinfo->nv_path[0] == '\0') {
5290 DHD_ERROR(("nvram path not found\n"));
5293 if (dhdinfo->conf_path[0] == '\0') {
5294 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
5296 #endif /* BCMEMBEDIMAGE */
5303 dhd_bus_start(dhd_pub_t *dhdp)
5306 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
5307 unsigned long flags;
5311 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
5313 DHD_PERIM_LOCK(dhdp);
5315 /* try to download image and nvram to the dongle */
5316 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
5317 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
5318 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
5319 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
5320 dhd->fw_path, dhd->nv_path, dhd->conf_path);
5322 DHD_ERROR(("%s: failed to download firmware %s\n",
5323 __FUNCTION__, dhd->fw_path));
5324 DHD_PERIM_UNLOCK(dhdp);
5328 if (dhd->pub.busstate != DHD_BUS_LOAD) {
5329 DHD_PERIM_UNLOCK(dhdp);
5333 dhd_os_sdlock(dhdp);
5335 /* Start the watchdog timer */
5336 dhd->pub.tickcnt = 0;
5337 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
5339 /* Bring up the bus */
5340 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
5342 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
5343 dhd_os_sdunlock(dhdp);
5344 DHD_PERIM_UNLOCK(dhdp);
5347 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
5348 #if defined(BCMPCIE_OOB_HOST_WAKE)
5349 dhd_os_sdunlock(dhdp);
5350 #endif /* BCMPCIE_OOB_HOST_WAKE */
5351 /* Host registration for OOB interrupt */
5352 if (dhd_bus_oob_intr_register(dhdp)) {
5353 /* deactivate timer and wait for the handler to finish */
5354 #if !defined(BCMPCIE_OOB_HOST_WAKE)
5355 DHD_GENERAL_LOCK(&dhd->pub, flags);
5356 dhd->wd_timer_valid = FALSE;
5357 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5358 del_timer_sync(&dhd->timer);
5360 dhd_os_sdunlock(dhdp);
5361 #endif /* BCMPCIE_OOB_HOST_WAKE */
5362 DHD_PERIM_UNLOCK(dhdp);
5363 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5364 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
5368 #if defined(BCMPCIE_OOB_HOST_WAKE)
5369 dhd_os_sdlock(dhdp);
5370 dhd_bus_oob_intr_set(dhdp, TRUE);
5372 /* Enable oob at firmware */
5373 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
5374 #endif /* BCMPCIE_OOB_HOST_WAKE */
5376 #ifdef PCIE_FULL_DONGLE
5379 uint32 num_flowrings; /* includes H2D common rings */
5380 num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
5381 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
5383 if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
5384 dhd_os_sdunlock(dhdp);
5385 DHD_PERIM_UNLOCK(dhdp);
5389 #endif /* PCIE_FULL_DONGLE */
5391 /* Do protocol initialization necessary for IOCTL/IOVAR */
5392 dhd_prot_init(&dhd->pub);
5394 /* If bus is not ready, can't come up */
5395 if (dhd->pub.busstate != DHD_BUS_DATA) {
5396 DHD_GENERAL_LOCK(&dhd->pub, flags);
5397 dhd->wd_timer_valid = FALSE;
5398 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5399 del_timer_sync(&dhd->timer);
5400 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
5401 dhd_os_sdunlock(dhdp);
5402 DHD_PERIM_UNLOCK(dhdp);
5403 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5407 dhd_os_sdunlock(dhdp);
5409 /* Bus is ready, query any dongle information */
5410 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
5411 DHD_PERIM_UNLOCK(dhdp);
5415 #ifdef ARP_OFFLOAD_SUPPORT
5416 if (dhd->pend_ipaddr) {
5417 #ifdef AOE_IP_ALIAS_SUPPORT
5418 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
5419 #endif /* AOE_IP_ALIAS_SUPPORT */
5420 dhd->pend_ipaddr = 0;
5422 #endif /* ARP_OFFLOAD_SUPPORT */
5424 DHD_PERIM_UNLOCK(dhdp);
5429 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
5431 char iovbuf[WLC_IOCTL_SMLEN];
5432 uint32 tdls = tdls_on;
5434 uint32 tdls_auto_op = 0;
5435 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
5436 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
5437 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
5439 if (!FW_SUPPORTED(dhd, tdls))
5442 if (dhd->tdls_enable == tdls_on)
5444 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
5445 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5446 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
5449 dhd->tdls_enable = tdls_on;
5452 tdls_auto_op = auto_on;
5453 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
5454 iovbuf, sizeof(iovbuf));
5455 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5456 sizeof(iovbuf), TRUE, 0)) < 0) {
5457 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
5462 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
5463 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
5464 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5465 sizeof(iovbuf), TRUE, 0)) < 0) {
5466 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
5469 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
5470 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5471 sizeof(iovbuf), TRUE, 0)) < 0) {
5472 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
5475 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
5476 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5477 sizeof(iovbuf), TRUE, 0)) < 0) {
5478 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
5487 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
5489 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5492 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
5497 #ifdef PCIE_FULL_DONGLE
5498 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
5500 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5501 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
5502 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
5503 tdls_peer_node_t *new = NULL, *prev = NULL;
5505 uint8 sa[ETHER_ADDR_LEN];
5506 int ifidx = dhd_net2idx(dhd, dev);
5508 if (ifidx == DHD_BAD_IF)
5511 dhdif = dhd->iflist[ifidx];
5512 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
5515 while (cur != NULL) {
5516 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5517 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
5518 __FUNCTION__, __LINE__));
5524 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
5526 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
5529 memcpy(new->addr, da, ETHER_ADDR_LEN);
5530 new->next = dhdp->peer_tbl.node;
5531 dhdp->peer_tbl.node = new;
5532 dhdp->peer_tbl.tdls_peer_count++;
5535 while (cur != NULL) {
5536 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5537 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
5539 prev->next = cur->next;
5541 dhdp->peer_tbl.node = cur->next;
5542 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
5543 dhdp->peer_tbl.tdls_peer_count--;
5549 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
5552 #endif /* PCIE_FULL_DONGLE */
5555 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
5560 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
5562 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
5563 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
5568 #if !defined(AP) && defined(WLP2P)
5569 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
5570 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
5571 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
5572 * would still be named as fw_bcmdhd_apsta.
5575 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
5578 char buf[WLC_IOCTL_SMLEN];
5579 bool mchan_supported = FALSE;
5580 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
5581 * test mode, that means we only will use the mode as it is
5583 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
5585 if (FW_SUPPORTED(dhd, vsdb)) {
5586 mchan_supported = TRUE;
5588 if (!FW_SUPPORTED(dhd, p2p)) {
5589 DHD_TRACE(("Chip does not support p2p\n"));
5593 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
5594 memset(buf, 0, sizeof(buf));
5595 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
5596 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
5598 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
5603 /* By default, chip supports single chan concurrency,
5604 * now lets check for mchan
5606 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
5607 if (mchan_supported)
5608 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
5609 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
5610 /* For customer_hw4, although ICS,
5611 * we still support concurrent mode
5624 #ifdef SUPPORT_AP_POWERSAVE
5625 #define RXCHAIN_PWRSAVE_PPS 10
5626 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
5627 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
5628 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
5631 int32 pps = RXCHAIN_PWRSAVE_PPS;
5632 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
5633 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
5636 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5637 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5638 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5639 DHD_ERROR(("Failed to enable AP power save\n"));
5641 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
5642 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5643 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5644 DHD_ERROR(("Failed to set pps\n"));
5646 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
5647 4, iovbuf, sizeof(iovbuf));
5648 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5649 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5650 DHD_ERROR(("Failed to set quiet time\n"));
5652 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
5653 4, iovbuf, sizeof(iovbuf));
5654 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5655 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5656 DHD_ERROR(("Failed to set stas assoc check\n"));
5659 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5660 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5661 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5662 DHD_ERROR(("Failed to disable AP power save\n"));
5668 #endif /* SUPPORT_AP_POWERSAVE */
5671 #if defined(READ_CONFIG_FROM_FILE)
5672 #include <linux/fs.h>
5673 #include <linux/ctype.h>
5675 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5676 bool PM_control = TRUE;
5678 static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
5681 wl_country_t cspec = {{0}, -1, {0}};
5683 char *endptr = NULL;
5685 char smbuf[WLC_IOCTL_SMLEN*2];
5687 if (!strcmp(name, "country")) {
5688 revstr = strchr(value, '/');
5690 cspec.rev = strtoul(revstr + 1, &endptr, 10);
5691 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5692 cspec.country_abbrev[2] = '\0';
5693 memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
5696 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5697 memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
5698 get_customized_country_code(dhd->info->adapter,
5699 (char *)&cspec.country_abbrev, &cspec);
5701 memset(smbuf, 0, sizeof(smbuf));
5702 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
5703 cspec.country_abbrev, cspec.rev));
5704 iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
5705 smbuf, sizeof(smbuf));
5706 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5707 smbuf, iolen, TRUE, 0);
5708 } else if (!strcmp(name, "roam_scan_period")) {
5709 var_int = (int)simple_strtol(value, NULL, 0);
5710 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
5711 &var_int, sizeof(var_int), TRUE, 0);
5712 } else if (!strcmp(name, "roam_delta")) {
5717 x.val = (int)simple_strtol(value, NULL, 0);
5718 /* x.band = WLC_BAND_AUTO; */
5719 x.band = WLC_BAND_ALL;
5720 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
5721 } else if (!strcmp(name, "roam_trigger")) {
5724 roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
5725 roam_trigger[1] = WLC_BAND_ALL;
5726 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
5727 sizeof(roam_trigger), TRUE, 0);
5730 } else if (!strcmp(name, "PM")) {
5732 var_int = (int)simple_strtol(value, NULL, 0);
5734 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
5735 &var_int, sizeof(var_int), TRUE, 0);
5737 #if defined(CONFIG_PM_LOCK)
5739 g_pm_control = TRUE;
5740 printk("%s var_int=%d don't control PM\n", __func__, var_int);
5742 g_pm_control = FALSE;
5743 printk("%s var_int=%d do control PM\n", __func__, var_int);
5750 else if (!strcmp(name, "btamp_chan")) {
5756 btamp_chan = (int)simple_strtol(value, NULL, 0);
5757 iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
5758 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
5759 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
5760 __FUNCTION__, btamp_chan, ret));
5762 DHD_ERROR(("%s btamp_chan %d set success\n",
5763 __FUNCTION__, btamp_chan));
5765 #endif /* WLBTAMP */
5766 else if (!strcmp(name, "band")) {
5768 if (!strcmp(value, "auto"))
5769 var_int = WLC_BAND_AUTO;
5770 else if (!strcmp(value, "a"))
5771 var_int = WLC_BAND_5G;
5772 else if (!strcmp(value, "b"))
5773 var_int = WLC_BAND_2G;
5774 else if (!strcmp(value, "all"))
5775 var_int = WLC_BAND_ALL;
5777 printk(" set band value should be one of the a or b or all\n");
5778 var_int = WLC_BAND_AUTO;
5780 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
5781 sizeof(var_int), TRUE, 0)) < 0)
5782 printk(" set band err=%d\n", ret);
5784 } else if (!strcmp(name, "cur_etheraddr")) {
5785 struct ether_addr ea;
5790 bcm_ether_atoe(value, &ea);
5792 ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
5794 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
5798 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
5799 ea.octet[0], ea.octet[1], ea.octet[2],
5800 ea.octet[3], ea.octet[4], ea.octet[5]));
5802 iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
5804 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
5806 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5810 memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
5813 } else if (!strcmp(name, "lpc")) {
5817 var_int = (int)simple_strtol(value, NULL, 0);
5818 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5819 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
5821 iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf));
5822 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
5823 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
5825 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
5826 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
5829 } else if (!strcmp(name, "vht_features")) {
5833 var_int = (int)simple_strtol(value, NULL, 0);
5835 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5836 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
5838 iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf));
5839 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
5840 DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
5842 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
5843 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
5848 char iovbuf[WLC_IOCTL_SMLEN];
5850 /* wlu_iovar_setint */
5851 var_int = (int)simple_strtol(value, NULL, 0);
5853 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
5854 if (!strcmp(name, "roam_off")) {
5855 /* Setup timeout if Beacons are lost to report link down */
5857 uint bcn_timeout = 2;
5858 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
5859 iovbuf, sizeof(iovbuf));
5860 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5863 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
5865 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
5867 iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
5868 iovbuf, sizeof(iovbuf));
5869 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5870 iovbuf, iovlen, TRUE, 0);
5876 static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
5878 mm_segment_t old_fs;
5880 struct file *fp = NULL;
5882 char *buf = NULL, *p, *name, *value;
5886 config_path = CONFIG_BCMDHD_CONFIG_PATH;
5890 printk(KERN_ERR "config_path can't read. \n");
5896 if ((ret = vfs_stat(config_path, &stat))) {
5898 printk(KERN_ERR "%s: Failed to get information (%d)\n",
5904 if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
5905 printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
5909 printk("dhd_preinit_config : config path : %s \n", config_path);
5911 if (!(fp = dhd_os_open_image(config_path)) ||
5912 (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
5915 buf[stat.size] = '\0';
5916 for (p = buf; *p; p++) {
5919 for (name = p++; *p && !isspace(*p); p++) {
5923 for (value = p; *p && !isspace(*p); p++);
5925 if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
5926 printk(KERN_ERR "%s: %s=%s\n",
5927 bcmerrorstr(ret), name, value);
5937 dhd_os_close_image(fp);
5939 MFREE(dhd->osh, buf, stat.size+1);
5946 #endif /* READ_CONFIG_FROM_FILE */
5949 dhd_preinit_ioctls(dhd_pub_t *dhd)
5952 char eventmask[WL_EVENTING_MASK_LEN];
5953 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
5954 uint32 buf_key_b4_m4 = 1;
5959 eventmsgs_ext_t *eventmask_msg = NULL;
5960 char* iov_buf = NULL;
5963 aibss_bcn_force_config_t bcn_config;
5967 #endif /* WLAIBSS_PS */
5968 #endif /* WLAIBSS */
5969 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
5972 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
5973 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
5974 uint32 ampdu_ba_wsize = 0;
5975 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
5976 #if defined(CUSTOM_AMPDU_MPDU)
5977 int32 ampdu_mpdu = 0;
5979 #if defined(CUSTOM_AMPDU_RELEASE)
5980 int32 ampdu_release = 0;
5982 #if defined(CUSTOM_AMSDU_AGGSF)
5983 int32 amsdu_aggsf = 0;
5986 #if defined(BCMSDIO)
5987 #ifdef PROP_TXSTATUS
5988 int wlfc_enable = TRUE;
5990 uint32 hostreorder = 1;
5992 #endif /* DISABLE_11N */
5993 #endif /* PROP_TXSTATUS */
5995 #ifdef PCIE_FULL_DONGLE
5996 uint32 wl_ap_isolate;
5997 #endif /* PCIE_FULL_DONGLE */
5999 #ifdef DHD_ENABLE_LPC
6001 #endif /* DHD_ENABLE_LPC */
6002 uint power_mode = PM_FAST;
6003 uint32 dongle_align = DHD_SDALIGN;
6004 #if defined(BCMSDIO)
6005 uint32 glom = CUSTOM_GLOM_SETTING;
6006 #endif /* defined(BCMSDIO) */
6007 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6010 uint bcn_timeout = dhd->conf->bcn_timeout;
6012 #if defined(ARP_OFFLOAD_SUPPORT)
6015 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
6016 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
6017 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
6018 char buf[WLC_IOCTL_SMLEN];
6020 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
6023 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
6024 int roam_scan_period[2] = {10, WLC_BAND_ALL};
6025 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
6026 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
6027 int roam_fullscan_period = 60;
6028 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6029 int roam_fullscan_period = 120;
6030 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6032 #ifdef DISABLE_BUILTIN_ROAM
6034 #endif /* DISABLE_BUILTIN_ROAM */
6035 #endif /* ROAM_ENABLE */
6040 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
6041 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
6042 struct ether_addr p2p_ea;
6047 #ifdef SOFTAP_UAPSD_OFF
6048 uint32 wme_apsd = 0;
6049 #endif /* SOFTAP_UAPSD_OFF */
6050 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
6051 uint32 apsta = 1; /* Enable APSTA mode */
6052 #elif defined(SOFTAP_AND_GC)
6055 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
6056 #ifdef GET_CUSTOM_MAC_ENABLE
6057 struct ether_addr ea_addr;
6058 #endif /* GET_CUSTOM_MAC_ENABLE */
6062 #endif /* DISABLE_11N */
6064 #if defined(DISABLE_11AC)
6066 #endif /* DISABLE_11AC */
6069 #endif /* USE_WL_TXBF */
6070 #ifdef AMPDU_VO_ENABLE
6071 struct ampdu_tid_control tid;
6073 #ifdef USE_WL_FRAMEBURST
6074 uint32 frameburst = 1;
6075 #endif /* USE_WL_FRAMEBURST */
6076 #ifdef DHD_SET_FW_HIGHSPEED
6077 uint32 ack_ratio = 250;
6078 uint32 ack_ratio_depth = 64;
6079 #endif /* DHD_SET_FW_HIGHSPEED */
6080 #ifdef SUPPORT_2G_VHT
6081 uint32 vht_features = 0x3; /* 2G enable | rates all */
6082 #endif /* SUPPORT_2G_VHT */
6083 #ifdef CUSTOM_PSPRETEND_THR
6084 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
6086 #ifdef PKT_FILTER_SUPPORT
6087 dhd_pkt_filter_enable = TRUE;
6088 #endif /* PKT_FILTER_SUPPORT */
6090 dhd->tdls_enable = FALSE;
6092 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
6093 DHD_TRACE(("Enter %s\n", __FUNCTION__));
6095 dhd_conf_set_band(dhd);
6098 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6099 (op_mode == DHD_FLAG_MFG_MODE)) {
6100 /* Check and adjust IOCTL response timeout for Manufactring firmware */
6101 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
6102 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
6106 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
6107 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
6109 #ifdef GET_CUSTOM_MAC_ENABLE
6110 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
6112 memset(buf, 0, sizeof(buf));
6113 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
6114 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6116 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6120 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
6122 #endif /* GET_CUSTOM_MAC_ENABLE */
6123 /* Get the default device MAC address directly from firmware */
6124 memset(buf, 0, sizeof(buf));
6125 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
6126 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
6128 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
6132 /* Update public MAC address after reading from Firmware */
6133 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
6135 #ifdef GET_CUSTOM_MAC_ENABLE
6137 #endif /* GET_CUSTOM_MAC_ENABLE */
6139 /* get a capabilities from firmware */
6140 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
6141 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
6142 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
6143 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
6144 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6145 __FUNCTION__, ret));
6148 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
6149 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
6150 #ifdef SET_RANDOM_MAC_SOFTAP
6153 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
6154 #if defined(ARP_OFFLOAD_SUPPORT)
6157 #ifdef PKT_FILTER_SUPPORT
6158 dhd_pkt_filter_enable = FALSE;
6160 #ifdef SET_RANDOM_MAC_SOFTAP
6161 SRANDOM32((uint)jiffies);
6162 rand_mac = RANDOM32();
6163 iovbuf[0] = 0x02; /* locally administered bit */
6166 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
6167 iovbuf[4] = (unsigned char)(rand_mac >> 8);
6168 iovbuf[5] = (unsigned char)(rand_mac >> 16);
6170 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
6171 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6173 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6175 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
6176 #endif /* SET_RANDOM_MAC_SOFTAP */
6177 #if !defined(AP) && defined(WL_CFG80211)
6178 /* Turn off MPC in AP mode */
6179 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6180 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6181 sizeof(iovbuf), TRUE, 0)) < 0) {
6182 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
6185 #ifdef SUPPORT_AP_POWERSAVE
6186 dhd_set_ap_powersave(dhd, 0, TRUE);
6188 #ifdef SOFTAP_UAPSD_OFF
6189 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
6190 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6191 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
6192 #endif /* SOFTAP_UAPSD_OFF */
6193 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6194 (op_mode == DHD_FLAG_MFG_MODE)) {
6195 #if defined(ARP_OFFLOAD_SUPPORT)
6197 #endif /* ARP_OFFLOAD_SUPPORT */
6198 #ifdef PKT_FILTER_SUPPORT
6199 dhd_pkt_filter_enable = FALSE;
6200 #endif /* PKT_FILTER_SUPPORT */
6201 dhd->op_mode = DHD_FLAG_MFG_MODE;
6203 uint32 concurrent_mode = 0;
6204 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
6205 (op_mode == DHD_FLAG_P2P_MODE)) {
6206 #if defined(ARP_OFFLOAD_SUPPORT)
6209 #ifdef PKT_FILTER_SUPPORT
6210 dhd_pkt_filter_enable = FALSE;
6212 dhd->op_mode = DHD_FLAG_P2P_MODE;
6213 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
6214 (op_mode == DHD_FLAG_IBSS_MODE)) {
6215 dhd->op_mode = DHD_FLAG_IBSS_MODE;
6217 dhd->op_mode = DHD_FLAG_STA_MODE;
6218 #if !defined(AP) && defined(WLP2P)
6219 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
6220 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
6221 #if defined(ARP_OFFLOAD_SUPPORT)
6224 dhd->op_mode |= concurrent_mode;
6227 /* Check if we are enabling p2p */
6228 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6229 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6230 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6231 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6232 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
6235 #if defined(SOFTAP_AND_GC)
6236 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
6237 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
6238 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
6241 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
6242 ETHER_SET_LOCALADDR(&p2p_ea);
6243 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
6244 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
6245 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6246 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6247 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
6249 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
6253 (void)concurrent_mode;
6257 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
6258 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
6259 /* Set Country code */
6260 if (dhd->dhd_cspec.ccode[0] != 0) {
6261 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
6262 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
6263 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
6264 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6265 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
6267 dhd_conf_set_country(dhd);
6268 dhd_conf_fix_country(dhd);
6270 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
6272 #if defined(DISABLE_11AC)
6273 bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
6274 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6275 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
6276 #endif /* DISABLE_11AC */
6278 /* Set Listen Interval */
6279 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
6280 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6281 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
6283 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
6284 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
6285 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
6286 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6287 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
6288 #if defined(ROAM_ENABLE)
6289 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
6290 sizeof(roam_trigger), TRUE, 0)) < 0)
6291 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
6292 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
6293 sizeof(roam_scan_period), TRUE, 0)) < 0)
6294 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
6295 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
6296 sizeof(roam_delta), TRUE, 0)) < 0)
6297 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
6298 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
6299 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6300 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
6301 #endif /* ROAM_ENABLE */
6302 dhd_conf_set_roam(dhd);
6305 bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
6306 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6309 /* by default TDLS on and auto mode off */
6310 _dhd_tdls_enable(dhd, true, false, NULL);
6313 #ifdef DHD_ENABLE_LPC
6315 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
6316 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6317 sizeof(iovbuf), TRUE, 0)) < 0) {
6318 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
6320 #endif /* DHD_ENABLE_LPC */
6321 dhd_conf_set_lpc(dhd);
6323 /* Set PowerSave mode */
6324 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
6326 /* Match Host and Dongle rx alignment */
6327 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
6328 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6330 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6331 /* enable credall to reduce the chance of no bus credit happened. */
6332 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
6333 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6336 #if defined(BCMSDIO)
6337 if (glom != DEFAULT_GLOM_VALUE) {
6338 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
6339 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6340 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6342 #endif /* defined(BCMSDIO) */
6343 dhd_conf_set_bus_txglom(dhd);
6345 /* Setup timeout if Beacons are lost and roam is off to report link down */
6346 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
6347 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6348 /* Setup assoc_retry_max count to reconnect target AP in dongle */
6349 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
6350 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6351 #if defined(AP) && !defined(WLP2P)
6352 /* Turn off MPC in AP mode */
6353 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6354 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6355 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6356 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6357 #endif /* defined(AP) && !defined(WLP2P) */
6358 dhd_conf_set_mimo_bw_cap(dhd);
6359 dhd_conf_force_wme(dhd);
6360 dhd_conf_set_stbc(dhd);
6361 dhd_conf_set_srl(dhd);
6362 dhd_conf_set_lrl(dhd);
6363 dhd_conf_set_spect(dhd);
6366 if (ap_fw_loaded == TRUE) {
6367 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
6371 #if defined(KEEP_ALIVE)
6373 /* Set Keep Alive : be sure to use FW with -keepalive */
6377 if (ap_fw_loaded == FALSE)
6379 if (!(dhd->op_mode &
6380 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
6381 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
6382 DHD_ERROR(("%s set keeplive failed %d\n",
6383 __FUNCTION__, res));
6386 #endif /* defined(KEEP_ALIVE) */
6389 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
6390 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6391 sizeof(iovbuf), TRUE, 0)) < 0) {
6392 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
6394 #endif /* USE_WL_TXBF */
6395 dhd_conf_set_txbf(dhd);
6396 #ifdef USE_WL_FRAMEBURST
6397 /* Set frameburst to value */
6398 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
6399 sizeof(frameburst), TRUE, 0)) < 0) {
6400 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
6402 #endif /* USE_WL_FRAMEBURST */
6403 dhd_conf_set_frameburst(dhd);
6404 #ifdef DHD_SET_FW_HIGHSPEED
6406 bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
6407 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6408 sizeof(iovbuf), TRUE, 0)) < 0) {
6409 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
6412 /* Set ack_ratio_depth */
6413 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
6414 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6415 sizeof(iovbuf), TRUE, 0)) < 0) {
6416 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
6418 #endif /* DHD_SET_FW_HIGHSPEED */
6419 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
6420 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
6421 /* Set ampdu ba wsize to 64 or 16 */
6422 #ifdef CUSTOM_AMPDU_BA_WSIZE
6423 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
6425 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
6426 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
6427 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
6428 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
6429 if (ampdu_ba_wsize != 0) {
6430 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
6431 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6432 sizeof(iovbuf), TRUE, 0)) < 0) {
6433 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
6434 __FUNCTION__, ampdu_ba_wsize, ret));
6437 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
6438 dhd_conf_set_ampdu_ba_wsize(dhd);
6440 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
6441 if (iov_buf == NULL) {
6442 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
6447 /* Configure custom IBSS beacon transmission */
6448 if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
6451 bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
6452 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6453 sizeof(iovbuf), TRUE, 0)) < 0) {
6454 DHD_ERROR(("%s Set aibss to %d failed %d\n",
6455 __FUNCTION__, aibss, ret));
6459 bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
6460 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6461 sizeof(iovbuf), TRUE, 0)) < 0) {
6462 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
6463 __FUNCTION__, aibss, ret));
6465 #endif /* WLAIBSS_PS */
6467 memset(&bcn_config, 0, sizeof(bcn_config));
6468 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
6469 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
6470 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
6471 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
6472 bcn_config.len = sizeof(bcn_config);
6474 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
6475 sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN);
6476 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
6477 WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6478 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
6479 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
6480 AIBSS_BCN_FLOOD_DUR, ret));
6482 #endif /* WLAIBSS */
6484 #if defined(CUSTOM_AMPDU_MPDU)
6485 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
6486 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
6487 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
6488 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6489 sizeof(iovbuf), TRUE, 0)) < 0) {
6490 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
6491 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
6494 #endif /* CUSTOM_AMPDU_MPDU */
6496 #if defined(CUSTOM_AMPDU_RELEASE)
6497 ampdu_release = CUSTOM_AMPDU_RELEASE;
6498 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
6499 bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf));
6500 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6501 sizeof(iovbuf), TRUE, 0)) < 0) {
6502 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
6503 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
6506 #endif /* CUSTOM_AMPDU_RELEASE */
6508 #if defined(CUSTOM_AMSDU_AGGSF)
6509 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
6510 if (amsdu_aggsf != 0) {
6511 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
6512 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6513 sizeof(iovbuf), TRUE, 0)) < 0) {
6514 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
6515 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
6518 #endif /* CUSTOM_AMSDU_AGGSF */
6520 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
6521 /* Read 4-way handshake requirements */
6522 if (dhd_use_idsup == 1) {
6523 bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
6524 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
6525 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
6526 * in-dongle supplicant.
6528 if (ret >= 0 || ret == BCME_NOTREADY)
6529 dhd->fw_4way_handshake = TRUE;
6530 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
6532 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
6533 #ifdef SUPPORT_2G_VHT
6534 bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
6535 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6536 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
6538 #endif /* SUPPORT_2G_VHT */
6539 #ifdef CUSTOM_PSPRETEND_THR
6540 /* Turn off MPC in AP mode */
6541 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
6542 iovbuf, sizeof(iovbuf));
6543 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6544 sizeof(iovbuf), TRUE, 0)) < 0) {
6545 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
6546 __FUNCTION__, ret));
6550 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
6551 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6552 sizeof(iovbuf), TRUE, 0)) < 0) {
6553 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
6556 /* Read event_msgs mask */
6557 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6558 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
6559 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
6562 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
6564 /* Setup event_msgs */
6565 setbit(eventmask, WLC_E_SET_SSID);
6566 setbit(eventmask, WLC_E_PRUNE);
6567 setbit(eventmask, WLC_E_AUTH);
6568 setbit(eventmask, WLC_E_AUTH_IND);
6569 setbit(eventmask, WLC_E_ASSOC);
6570 setbit(eventmask, WLC_E_REASSOC);
6571 setbit(eventmask, WLC_E_REASSOC_IND);
6572 setbit(eventmask, WLC_E_DEAUTH);
6573 setbit(eventmask, WLC_E_DEAUTH_IND);
6574 setbit(eventmask, WLC_E_DISASSOC_IND);
6575 setbit(eventmask, WLC_E_DISASSOC);
6576 setbit(eventmask, WLC_E_JOIN);
6577 setbit(eventmask, WLC_E_START);
6578 setbit(eventmask, WLC_E_ASSOC_IND);
6579 setbit(eventmask, WLC_E_PSK_SUP);
6580 setbit(eventmask, WLC_E_LINK);
6581 setbit(eventmask, WLC_E_NDIS_LINK);
6582 setbit(eventmask, WLC_E_MIC_ERROR);
6583 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
6584 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
6586 setbit(eventmask, WLC_E_PMKID_CACHE);
6587 setbit(eventmask, WLC_E_TXFAIL);
6589 setbit(eventmask, WLC_E_JOIN_START);
6590 setbit(eventmask, WLC_E_SCAN_COMPLETE);
6592 setbit(eventmask, WLC_E_HTSFSYNC);
6593 #endif /* WLMEDIA_HTSF */
6595 setbit(eventmask, WLC_E_PFN_NET_FOUND);
6596 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
6597 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
6598 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
6599 #endif /* PNO_SUPPORT */
6600 /* enable dongle roaming event */
6601 setbit(eventmask, WLC_E_ROAM);
6602 setbit(eventmask, WLC_E_BSSID);
6604 setbit(eventmask, WLC_E_ADDTS_IND);
6605 setbit(eventmask, WLC_E_DELTS_IND);
6608 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
6611 setbit(eventmask, WLC_E_ESCAN_RESULT);
6612 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6613 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
6614 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
6616 #endif /* WL_CFG80211 */
6618 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
6619 #endif /* WLAIBSS */
6620 #ifdef CUSTOMER_HW10
6621 clrbit(eventmask, WLC_E_TRACE);
6623 setbit(eventmask, WLC_E_TRACE);
6625 /* Write updated Event mask */
6626 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6627 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6628 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
6632 /* make up event mask ext message iovar for event larger than 128 */
6633 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
6634 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
6635 if (eventmask_msg == NULL) {
6636 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
6640 bzero(eventmask_msg, msglen);
6641 eventmask_msg->ver = EVENTMSGS_VER;
6642 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6644 /* Read event_msgs_ext mask */
6645 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
6646 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
6647 if (ret2 != BCME_UNSUPPORTED)
6649 if (ret2 == 0) { /* event_msgs_ext must be supported */
6650 bcopy(iov_buf, eventmask_msg, msglen);
6652 #ifdef BT_WIFI_HANDOVER
6653 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
6654 #endif /* BT_WIFI_HANDOVER */
6656 /* Write updated Event mask */
6657 eventmask_msg->ver = EVENTMSGS_VER;
6658 eventmask_msg->command = EVENTMSGS_SET_MASK;
6659 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6660 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
6661 msglen, iov_buf, WLC_IOCTL_SMLEN);
6662 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6663 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6664 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
6667 } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
6668 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
6670 } /* unsupported is ok */
6672 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
6673 sizeof(scan_assoc_time), TRUE, 0);
6674 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
6675 sizeof(scan_unassoc_time), TRUE, 0);
6676 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
6677 sizeof(scan_passive_time), TRUE, 0);
6679 #ifdef ARP_OFFLOAD_SUPPORT
6680 /* Set and enable ARP offload feature for STA only */
6682 if (arpoe && !ap_fw_loaded)
6687 dhd_arp_offload_enable(dhd, TRUE);
6688 dhd_arp_offload_set(dhd, dhd_arp_mode);
6690 dhd_arp_offload_enable(dhd, FALSE);
6691 dhd_arp_offload_set(dhd, 0);
6693 dhd_arp_enable = arpoe;
6694 #endif /* ARP_OFFLOAD_SUPPORT */
6696 #ifdef PKT_FILTER_SUPPORT
6697 /* Setup default defintions for pktfilter , enable in suspend */
6698 dhd->pktfilter_count = 6;
6699 /* Setup filter to allow only unicast */
6700 if (dhd_master_mode) {
6701 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
6702 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
6703 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
6704 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
6705 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
6706 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
6707 /* apply APP pktfilter */
6708 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
6710 dhd_conf_discard_pkt_filter(dhd);
6711 dhd_conf_add_pkt_filter(dhd);
6715 dhd_enable_packet_filter(0, dhd);
6717 #endif /* defined(SOFTAP) */
6718 dhd_set_packet_filter(dhd);
6719 #endif /* PKT_FILTER_SUPPORT */
6721 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
6722 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6723 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
6724 #endif /* DISABLE_11N */
6726 #ifdef AMPDU_VO_ENABLE
6727 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
6729 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6730 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6732 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
6734 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6735 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6737 #if defined(SOFTAP_TPUT_ENHANCE)
6738 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6739 dhd_bus_setidletime(dhd, (int)100);
6740 #ifdef DHDTCPACK_SUPPRESS
6741 dhd->tcpack_sup_enabled = FALSE;
6743 #if defined(DHD_TCP_WINSIZE_ADJUST)
6744 dhd_use_tcp_window_size_adjust = TRUE;
6747 memset(buf, 0, sizeof(buf));
6748 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
6749 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
6751 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6752 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6757 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
6759 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6763 #endif /* SOFTAP_TPUT_ENHANCE */
6765 /* query for 'ver' to get version info from firmware */
6766 memset(buf, 0, sizeof(buf));
6768 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
6769 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
6770 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6772 bcmstrtok(&ptr, "\n", 0);
6773 /* Print fw version info */
6774 DHD_ERROR(("Firmware version = %s\n", buf));
6775 dhd_set_version_info(dhd, buf);
6778 #if defined(BCMSDIO)
6779 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
6780 #endif /* defined(BCMSDIO) */
6782 dhd_conf_set_disable_proptx(dhd);
6783 #if defined(BCMSDIO)
6784 #ifdef PROP_TXSTATUS
6785 if (disable_proptx ||
6786 #ifdef PROP_TXSTATUS_VSDB
6787 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
6788 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
6789 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
6790 #endif /* PROP_TXSTATUS_VSDB */
6792 wlfc_enable = FALSE;
6796 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
6797 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
6798 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6799 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
6800 if (ret2 != BCME_UNSUPPORTED)
6802 if (ret2 != BCME_OK)
6805 #endif /* DISABLE_11N */
6807 #ifdef READ_CONFIG_FROM_FILE
6808 dhd_preinit_config(dhd, 0);
6809 #endif /* READ_CONFIG_FROM_FILE */
6814 else if (hostreorder)
6815 dhd_wlfc_hostreorder_init(dhd);
6816 #endif /* DISABLE_11N */
6818 #endif /* PROP_TXSTATUS */
6819 #endif /* BCMSDIO || BCMBUS */
6820 #ifdef PCIE_FULL_DONGLE
6821 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
6822 if (FW_SUPPORTED(dhd, ap)) {
6823 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
6824 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
6825 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6826 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6828 #endif /* PCIE_FULL_DONGLE */
6830 if (!dhd->pno_state) {
6835 dhd_interworking_enable(dhd);
6838 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
6844 kfree(eventmask_msg);
6853 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
6855 char buf[strlen(name) + 1 + cmd_len];
6856 int len = sizeof(buf);
6860 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
6862 memset(&ioc, 0, sizeof(ioc));
6864 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
6869 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
6870 if (!set && ret >= 0)
6871 memcpy(cmd_buf, buf, cmd_len);
6876 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
6878 struct dhd_info *dhd = dhdp->info;
6879 struct net_device *dev = NULL;
6881 ASSERT(dhd && dhd->iflist[ifidx]);
6882 dev = dhd->iflist[ifidx]->net;
6885 if (netif_running(dev)) {
6886 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
6887 return BCME_NOTDOWN;
6890 #define DHD_MIN_MTU 1500
6891 #define DHD_MAX_MTU 1752
6893 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
6894 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
6902 #ifdef ARP_OFFLOAD_SUPPORT
6903 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
6905 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
6907 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
6911 bzero(ipv4_buf, sizeof(ipv4_buf));
6913 /* display what we've got */
6914 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6915 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
6917 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6919 /* now we saved hoste_ip table, clr it in the dongle AOE */
6920 dhd_aoe_hostip_clr(dhd_pub, idx);
6923 DHD_ERROR(("%s failed\n", __FUNCTION__));
6927 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6928 if (add && (ipv4_buf[i] == 0)) {
6930 add = FALSE; /* added ipa to local table */
6931 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
6933 } else if (ipv4_buf[i] == ipa) {
6935 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
6936 __FUNCTION__, ipa, i));
6939 if (ipv4_buf[i] != 0) {
6940 /* add back host_ip entries from our local cache */
6941 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
6942 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
6943 __FUNCTION__, ipv4_buf[i], i));
6947 /* see the resulting hostip table */
6948 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6949 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
6950 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6955 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
6956 * whenever there is an event related to an IP address.
6957 * ptr : kernel provided pointer to IP address that has changed
6959 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
6960 unsigned long event,
6963 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
6969 if (!dhd_arp_enable)
6971 if (!ifa || !(ifa->ifa_dev->dev))
6974 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6975 /* Filter notifications meant for non Broadcom devices */
6976 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
6977 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
6978 #if defined(WL_ENABLE_P2P_IF)
6979 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
6980 #endif /* WL_ENABLE_P2P_IF */
6983 #endif /* LINUX_VERSION_CODE */
6985 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
6989 dhd_pub = &dhd->pub;
6991 if (dhd_pub->arp_version == 1) {
6995 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
6996 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
6999 if (idx < DHD_MAX_IFS)
7000 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
7001 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
7003 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
7010 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
7011 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7013 if (dhd->pub.busstate != DHD_BUS_DATA) {
7014 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
7015 if (dhd->pend_ipaddr) {
7016 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
7017 __FUNCTION__, dhd->pend_ipaddr));
7019 dhd->pend_ipaddr = ifa->ifa_address;
7023 #ifdef AOE_IP_ALIAS_SUPPORT
7024 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
7026 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
7027 #endif /* AOE_IP_ALIAS_SUPPORT */
7031 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
7032 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7033 dhd->pend_ipaddr = 0;
7034 #ifdef AOE_IP_ALIAS_SUPPORT
7035 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
7037 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
7039 dhd_aoe_hostip_clr(&dhd->pub, idx);
7040 dhd_aoe_arp_clr(&dhd->pub, idx);
7041 #endif /* AOE_IP_ALIAS_SUPPORT */
7045 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
7046 __func__, ifa->ifa_label, event));
7051 #endif /* ARP_OFFLOAD_SUPPORT */
7054 /* Neighbor Discovery Offload: defered handler */
7056 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
7058 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
7059 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
7062 if (event != DHD_WQ_WORK_IPV6_NDO) {
7063 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7068 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
7073 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
7077 if (ndo_work->if_idx) {
7078 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
7082 switch (ndo_work->event) {
7084 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__));
7085 ret = dhd_ndo_enable(pub, TRUE);
7087 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
7090 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
7092 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
7093 __FUNCTION__, ret));
7097 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
7098 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
7100 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
7101 __FUNCTION__, ret));
7105 ret = dhd_ndo_enable(pub, FALSE);
7107 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
7112 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
7116 /* free ndo_work. alloced while scheduling the work */
7123 * Neighbor Discovery Offload: Called when an interface
7124 * is assigned with ipv6 address.
7125 * Handles only primary interface
7127 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
7128 unsigned long event,
7133 struct inet6_ifaddr *inet6_ifa = ptr;
7134 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
7135 struct ipv6_work_info_t *ndo_info;
7136 int idx = 0; /* REVISIT */
7138 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7139 /* Filter notifications meant for non Broadcom devices */
7140 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
7143 #endif /* LINUX_VERSION_CODE */
7145 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
7149 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
7151 dhd_pub = &dhd->pub;
7152 if (!FW_SUPPORTED(dhd_pub, ndoe))
7155 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
7157 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
7161 ndo_info->event = event;
7162 ndo_info->if_idx = idx;
7163 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
7165 /* defer the work to thread as it may block kernel */
7166 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
7167 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
7170 #endif /* #ifdef CONFIG_IPV6 */
7173 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
7175 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7177 struct net_device *net = NULL;
7179 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
7181 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
7183 ASSERT(dhd && dhd->iflist[ifidx]);
7184 ifp = dhd->iflist[ifidx];
7186 ASSERT(net && (ifp->idx == ifidx));
7189 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7191 net->get_stats = dhd_get_stats;
7192 net->do_ioctl = dhd_ioctl_entry;
7193 net->hard_start_xmit = dhd_start_xmit;
7194 net->set_mac_address = dhd_set_mac_address;
7195 net->set_multicast_list = dhd_set_multicast_list;
7196 net->open = net->stop = NULL;
7198 ASSERT(!net->netdev_ops);
7199 net->netdev_ops = &dhd_ops_virt;
7200 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7202 net->netdev_ops = &dhd_cfgp2p_ops_virt;
7203 #endif /* P2PONEINT */
7205 /* Ok, link into the network layer... */
7208 * device functions for the primary interface only
7210 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7211 net->open = dhd_open;
7212 net->stop = dhd_stop;
7214 net->netdev_ops = &dhd_ops_pri;
7215 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7216 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
7217 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7220 * We have to use the primary MAC for virtual interfaces
7222 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
7224 * Android sets the locally administered bit to indicate that this is a
7225 * portable hotspot. This will not work in simultaneous AP/STA mode,
7226 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7228 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
7230 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7231 __func__, net->name));
7232 temp_addr[0] |= 0x02;
7236 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
7237 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
7238 net->ethtool_ops = &dhd_ethtool_ops;
7239 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
7241 #if defined(WL_WIRELESS_EXT)
7242 #if WIRELESS_EXT < 19
7243 net->get_wireless_stats = dhd_get_wireless_stats;
7244 #endif /* WIRELESS_EXT < 19 */
7245 #if WIRELESS_EXT > 12
7246 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
7247 #endif /* WIRELESS_EXT > 12 */
7248 #endif /* defined(WL_WIRELESS_EXT) */
7250 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
7252 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
7255 printf("%s\n", dhd_version);
7258 err = register_netdev(net);
7260 err = register_netdevice(net);
7263 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
7268 err = custom_rps_map_set(net->_rx, RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
7270 DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__, err));
7271 #endif /* SET_RPS_CPUS */
7275 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
7276 MAC2STRDBG(net->dev_addr));
7278 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
7279 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
7282 #if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
7283 KERNEL_VERSION(2, 6, 27))))
7286 up(&dhd_registration_sem);
7288 if (!dhd_download_fw_on_driverload) {
7289 dhd_net_bus_devreset(net, TRUE);
7291 dhd_net_bus_suspend(net);
7292 #endif /* BCMLXSDMMC */
7293 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
7296 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
7300 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
7303 net->netdev_ops = NULL;
7309 dhd_bus_detach(dhd_pub_t *dhdp)
7313 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7316 dhd = (dhd_info_t *)dhdp->info;
7320 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
7321 * calling stop again will cuase SD read/write errors.
7323 if (dhd->pub.busstate != DHD_BUS_DOWN) {
7324 /* Stop the protocol module */
7325 dhd_prot_stop(&dhd->pub);
7327 /* Stop the bus module */
7328 dhd_bus_stop(dhd->pub.bus, TRUE);
7331 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7332 dhd_bus_oob_intr_unregister(dhdp);
7339 void dhd_detach(dhd_pub_t *dhdp)
7342 unsigned long flags;
7343 int timer_valid = FALSE;
7348 dhd = (dhd_info_t *)dhdp->info;
7352 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7354 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7356 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
7359 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
7360 /* Give sufficient time for threads to start running in case
7361 * dhd_attach() has failed
7366 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
7367 dhd_bus_detach(dhdp);
7368 #ifdef PCIE_FULL_DONGLE
7369 dhd_flow_rings_deinit(dhdp);
7373 dhd_prot_detach(dhdp);
7376 #ifdef ARP_OFFLOAD_SUPPORT
7377 if (dhd_inetaddr_notifier_registered) {
7378 dhd_inetaddr_notifier_registered = FALSE;
7379 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7381 #endif /* ARP_OFFLOAD_SUPPORT */
7383 if (dhd_inet6addr_notifier_registered) {
7384 dhd_inet6addr_notifier_registered = FALSE;
7385 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7389 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7390 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
7391 if (dhd->early_suspend.suspend)
7392 unregister_early_suspend(&dhd->early_suspend);
7394 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7396 #if defined(WL_WIRELESS_EXT)
7397 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
7398 /* Detatch and unlink in the iw */
7401 #endif /* defined(WL_WIRELESS_EXT) */
7403 /* delete all interfaces, start with virtual */
7404 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
7408 /* Cleanup virtual interfaces */
7409 dhd_net_if_lock_local(dhd);
7410 for (i = 1; i < DHD_MAX_IFS; i++) {
7412 dhd_remove_if(&dhd->pub, i, TRUE);
7414 dhd_net_if_unlock_local(dhd);
7416 /* delete primary interface 0 */
7417 ifp = dhd->iflist[0];
7420 if (ifp && ifp->net) {
7424 /* in unregister_netdev case, the interface gets freed by net->destructor
7425 * (which is set to free_netdev)
7427 if (ifp->net->reg_state == NETREG_UNINITIALIZED)
7428 free_netdev(ifp->net);
7431 custom_rps_map_clear(ifp->net->_rx);
7432 #endif /* SET_RPS_CPUS */
7433 unregister_netdev(ifp->net);
7437 dhd_wmf_cleanup(dhdp, 0);
7438 #endif /* DHD_WMF */
7440 dhd_if_del_sta_list(ifp);
7442 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
7443 dhd->iflist[0] = NULL;
7447 /* Clear the watchdog timer */
7448 DHD_GENERAL_LOCK(&dhd->pub, flags);
7449 timer_valid = dhd->wd_timer_valid;
7450 dhd->wd_timer_valid = FALSE;
7451 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7453 del_timer_sync(&dhd->timer);
7455 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
7456 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
7457 PROC_STOP(&dhd->thr_wdt_ctl);
7460 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
7461 PROC_STOP(&dhd->thr_rxf_ctl);
7464 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
7465 PROC_STOP(&dhd->thr_dpc_ctl);
7467 tasklet_kill(&dhd->tasklet);
7470 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
7471 wl_cfg80211_detach(NULL);
7472 dhd_monitor_uninit();
7475 /* free deferred work queue */
7476 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
7477 dhd->dhd_deferred_wq = NULL;
7479 #ifdef SHOW_LOGTRACE
7480 if (dhd->event_data.fmts)
7481 kfree(dhd->event_data.fmts);
7482 if (dhd->event_data.raw_fmts)
7483 kfree(dhd->event_data.raw_fmts);
7484 #endif /* SHOW_LOGTRACE */
7487 if (dhdp->pno_state)
7488 dhd_pno_deinit(dhdp);
7490 #if defined(CONFIG_PM_SLEEP)
7491 if (dhd_pm_notifier_registered) {
7492 unregister_pm_notifier(&dhd_pm_notifier);
7493 dhd_pm_notifier_registered = FALSE;
7495 #endif /* CONFIG_PM_SLEEP */
7496 #ifdef DEBUG_CPU_FREQ
7498 free_percpu(dhd->new_freq);
7499 dhd->new_freq = NULL;
7500 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7502 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
7503 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
7504 #ifdef CONFIG_HAS_WAKELOCK
7505 dhd->wakelock_counter = 0;
7506 dhd->wakelock_wd_counter = 0;
7507 dhd->wakelock_rx_timeout_enable = 0;
7508 dhd->wakelock_ctrl_timeout_enable = 0;
7509 wake_lock_destroy(&dhd->wl_wifi);
7510 wake_lock_destroy(&dhd->wl_rxwake);
7511 wake_lock_destroy(&dhd->wl_ctrlwake);
7512 wake_lock_destroy(&dhd->wl_wdwake);
7513 #ifdef BCMPCIE_OOB_HOST_WAKE
7514 wake_lock_destroy(&dhd->wl_intrwake);
7515 #endif /* BCMPCIE_OOB_HOST_WAKE */
7516 #endif /* CONFIG_HAS_WAKELOCK */
7522 #ifdef DHDTCPACK_SUPPRESS
7523 /* This will free all MEM allocated for TCPACK SUPPRESS */
7524 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7525 #endif /* DHDTCPACK_SUPPRESS */
7526 dhd_conf_detach(dhdp);
7531 dhd_free(dhd_pub_t *dhdp)
7534 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7538 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7539 if (dhdp->reorder_bufs[i]) {
7540 reorder_info_t *ptr;
7541 uint32 buf_size = sizeof(struct reorder_info);
7543 ptr = dhdp->reorder_bufs[i];
7545 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7546 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7547 i, ptr->max_idx, buf_size));
7549 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7550 dhdp->reorder_bufs[i] = NULL;
7554 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
7556 dhd = (dhd_info_t *)dhdp->info;
7557 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
7559 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
7560 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
7566 dhd_clear(dhd_pub_t *dhdp)
7568 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7572 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7573 if (dhdp->reorder_bufs[i]) {
7574 reorder_info_t *ptr;
7575 uint32 buf_size = sizeof(struct reorder_info);
7577 ptr = dhdp->reorder_bufs[i];
7579 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7580 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7581 i, ptr->max_idx, buf_size));
7583 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7584 dhdp->reorder_bufs[i] = NULL;
7588 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
7593 dhd_module_cleanup(void)
7595 printk("%s: Enter\n", __FUNCTION__);
7597 dhd_bus_unregister();
7601 dhd_wifi_platform_unregister_drv();
7602 printk("%s: Exit\n", __FUNCTION__);
7606 dhd_module_exit(void)
7608 dhd_module_cleanup();
7609 unregister_reboot_notifier(&dhd_reboot_notifier);
7613 dhd_module_init(void)
7616 int retry = POWERUP_MAX_RETRY;
7618 printk("%s: in\n", __FUNCTION__);
7620 DHD_PERIM_RADIO_INIT();
7622 if (firmware_path[0] != '\0') {
7623 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
7624 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7627 if (nvram_path[0] != '\0') {
7628 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
7629 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7633 err = dhd_wifi_platform_register_drv();
7635 register_reboot_notifier(&dhd_reboot_notifier);
7639 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
7640 __FUNCTION__, retry));
7641 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
7642 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
7643 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
7644 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
7649 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
7651 printk("%s: Exit err=%d\n", __FUNCTION__, err);
7656 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
7658 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
7659 if (code == SYS_RESTART) {
7665 #include <linux/rfkill-wlan.h>
7666 extern int get_wifi_chip_type(void);
7667 extern char WIFI_MODULE_NAME[];
7668 extern char RKWIFI_DRV_VERSION[];
7670 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7671 static int wifi_init_thread(void *data)
7678 int rockchip_wifi_init_module_rkwifi(void)
7680 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7681 int type = get_wifi_chip_type();
7682 if (type > WIFI_AP6XXX_SERIES) return 0;
7684 printk("=======================================================\n");
7685 printk("==== Launching Wi-Fi driver! (Powered by Rockchip) ====\n");
7686 printk("=======================================================\n");
7687 printk("%s WiFi driver (Powered by Rockchip,Ver %s) init.\n", WIFI_MODULE_NAME, RKWIFI_DRV_VERSION);
7689 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7691 struct task_struct *kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
7692 if (kthread->pid < 0)
7693 printk("create wifi_init_thread failed.\n");
7697 return dhd_module_init();
7701 void rockchip_wifi_exit_module_rkwifi(void)
7703 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7704 int type = get_wifi_chip_type();
7705 if (type > WIFI_AP6XXX_SERIES) return;
7707 printk("=======================================================\n");
7708 printk("== Dis-launching Wi-Fi driver! (Powered by Rockchip) ==\n");
7709 printk("=======================================================\n");
7713 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7714 late_initcall(rockchip_wifi_init_module_rkwifi);
7715 module_exit(rockchip_wifi_exit_module_rkwifi);
7717 EXPORT_SYMBOL(rockchip_wifi_init_module_rkwifi);
7718 EXPORT_SYMBOL(rockchip_wifi_exit_module_rkwifi);
7720 //#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7721 //#if defined(CONFIG_DEFERRED_INITCALLS)
7722 //deferred_module_init(dhd_module_init);
7723 //#elif defined(USE_LATE_INITCALL_SYNC)
7724 //late_initcall_sync(dhd_module_init);
7726 //late_initcall(dhd_module_init);
7727 //#endif /* USE_LATE_INITCALL_SYNC */
7729 //module_init(dhd_module_init);
7730 //#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7732 //module_exit(dhd_module_exit);
7735 * OS specific functions required to implement DHD driver in OS independent way
7738 dhd_os_proto_block(dhd_pub_t *pub)
7740 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7743 DHD_PERIM_UNLOCK(pub);
7745 down(&dhd->proto_sem);
7747 DHD_PERIM_LOCK(pub);
7755 dhd_os_proto_unblock(dhd_pub_t *pub)
7757 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7760 up(&dhd->proto_sem);
7768 dhd_os_get_ioctl_resp_timeout(void)
7770 return ((unsigned int)dhd_ioctl_timeout_msec);
7774 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
7776 dhd_ioctl_timeout_msec = (int)timeout_msec;
7780 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
7782 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7785 /* Convert timeout in millsecond to jiffies */
7786 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7787 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
7789 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
7792 DHD_PERIM_UNLOCK(pub);
7794 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
7796 DHD_PERIM_LOCK(pub);
7802 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
7804 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
7806 wake_up(&dhd->ioctl_resp_wait);
7811 dhd_os_wd_timer_extend(void *bus, bool extend)
7813 dhd_pub_t *pub = bus;
7814 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7817 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
7819 dhd_os_wd_timer(bus, dhd->default_wd_interval);
7824 dhd_os_wd_timer(void *bus, uint wdtick)
7826 dhd_pub_t *pub = bus;
7827 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7828 unsigned long flags;
7830 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7833 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
7837 DHD_GENERAL_LOCK(pub, flags);
7839 /* don't start the wd until fw is loaded */
7840 if (pub->busstate == DHD_BUS_DOWN) {
7841 DHD_GENERAL_UNLOCK(pub, flags);
7843 DHD_OS_WD_WAKE_UNLOCK(pub);
7847 /* Totally stop the timer */
7848 if (!wdtick && dhd->wd_timer_valid == TRUE) {
7849 dhd->wd_timer_valid = FALSE;
7850 DHD_GENERAL_UNLOCK(pub, flags);
7851 del_timer_sync(&dhd->timer);
7852 DHD_OS_WD_WAKE_UNLOCK(pub);
7857 DHD_OS_WD_WAKE_LOCK(pub);
7858 dhd_watchdog_ms = (uint)wdtick;
7859 /* Re arm the timer, at last watchdog period */
7860 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
7861 dhd->wd_timer_valid = TRUE;
7863 DHD_GENERAL_UNLOCK(pub, flags);
7867 dhd_os_open_image(char *filename)
7871 fp = filp_open(filename, O_RDONLY, 0);
7873 * 2.6.11 (FC4) supports filp_open() but later revs don't?
7875 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
7885 dhd_os_get_image_block(char *buf, int len, void *image)
7887 struct file *fp = (struct file *)image;
7893 rdlen = kernel_read(fp, fp->f_pos, buf, len);
7901 dhd_os_close_image(void *image)
7904 filp_close((struct file *)image, NULL);
7908 dhd_os_sdlock(dhd_pub_t *pub)
7912 dhd = (dhd_info_t *)(pub->info);
7914 if (dhd_dpc_prio >= 0)
7917 spin_lock_bh(&dhd->sdlock);
7921 dhd_os_sdunlock(dhd_pub_t *pub)
7925 dhd = (dhd_info_t *)(pub->info);
7927 if (dhd_dpc_prio >= 0)
7930 spin_unlock_bh(&dhd->sdlock);
7934 dhd_os_sdlock_txq(dhd_pub_t *pub)
7938 dhd = (dhd_info_t *)(pub->info);
7939 spin_lock_bh(&dhd->txqlock);
7943 dhd_os_sdunlock_txq(dhd_pub_t *pub)
7947 dhd = (dhd_info_t *)(pub->info);
7948 spin_unlock_bh(&dhd->txqlock);
7952 dhd_os_sdlock_rxq(dhd_pub_t *pub)
7957 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
7962 dhd_os_rxflock(dhd_pub_t *pub)
7966 dhd = (dhd_info_t *)(pub->info);
7967 spin_lock_bh(&dhd->rxf_lock);
7972 dhd_os_rxfunlock(dhd_pub_t *pub)
7976 dhd = (dhd_info_t *)(pub->info);
7977 spin_unlock_bh(&dhd->rxf_lock);
7980 #ifdef DHDTCPACK_SUPPRESS
7982 dhd_os_tcpacklock(dhd_pub_t *pub)
7986 dhd = (dhd_info_t *)(pub->info);
7987 spin_lock_bh(&dhd->tcpack_lock);
7992 dhd_os_tcpackunlock(dhd_pub_t *pub)
7996 dhd = (dhd_info_t *)(pub->info);
7997 spin_unlock_bh(&dhd->tcpack_lock);
7999 #endif /* DHDTCPACK_SUPPRESS */
8001 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
8004 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
8006 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
8008 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
8009 " size: %dbytes\n", __FUNCTION__, section, size));
8010 if (kmalloc_if_fail)
8011 buf = kmalloc(size, flags);
8017 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
8021 #if defined(WL_WIRELESS_EXT)
8022 struct iw_statistics *
8023 dhd_get_wireless_stats(struct net_device *dev)
8026 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8032 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
8035 return &dhd->iw.wstats;
8039 #endif /* defined(WL_WIRELESS_EXT) */
8041 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8043 dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
8044 wl_event_msg_t *event, void **data)
8047 char eabuf[ETHER_ADDR_STR_LEN];
8048 struct ether_addr *addr = &event->addr;
8049 uint32 type = ntoh32_ua((void *)&event->event_type);
8054 bcm_ether_ntoa(addr, eabuf);
8056 return (BCME_ERROR);
8058 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8059 if (dhd->wlanaudio_blist[cnt].is_blacklist)
8062 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
8063 addr, ETHER_ADDR_LEN)) {
8064 /* Mac address is Same */
8065 dhd->wlanaudio_blist[cnt].cnt++;
8067 if (dhd->wlanaudio_blist[cnt].cnt < 15) {
8068 /* black list is false */
8069 if ((dhd->wlanaudio_blist[cnt].cnt > 10) &&
8070 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
8072 dhd->wlanaudio_blist[cnt].is_blacklist = true;
8073 dhd->is_wlanaudio_blist = true;
8076 if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
8077 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
8080 bzero(&dhd->wlanaudio_blist[cnt],
8081 sizeof(struct wlanaudio_blacklist));
8085 } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
8086 (!dhd->wlanaudio_blist[cnt].cnt)) {
8088 (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr,
8090 dhd->wlanaudio_blist[cnt].cnt++;
8091 dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies;
8093 bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf);
8099 case WLC_E_AUTH_IND :
8101 case WLC_E_DEAUTH_IND :
8103 case WLC_E_ASSOC_IND:
8105 case WLC_E_REASSOC_IND:
8106 case WLC_E_DISASSOC:
8107 case WLC_E_DISASSOC_IND:
8112 bcm_ether_ntoa(addr, eabuf);
8114 return (BCME_ERROR);
8116 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8117 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
8118 addr, ETHER_ADDR_LEN)) {
8119 /* Mac address is Same */
8120 if (dhd->wlanaudio_blist[cnt].is_blacklist) {
8121 /* black list is true */
8122 bzero(&dhd->wlanaudio_blist[cnt],
8123 sizeof(struct wlanaudio_blacklist));
8128 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8129 if (dhd->wlanaudio_blist[cnt].is_blacklist)
8135 dhd->is_wlanaudio_blist = false;
8143 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8145 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
8146 wl_event_msg_t *event, void **data)
8150 ASSERT(dhd != NULL);
8152 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8153 bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data);
8155 if (bcmerror != BCME_OK)
8157 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8159 #ifdef SHOW_LOGTRACE
8160 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
8162 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
8163 #endif /* SHOW_LOGTRACE */
8165 if (bcmerror != BCME_OK)
8168 #if defined(WL_WIRELESS_EXT)
8169 if (event->bsscfgidx == 0) {
8171 * Wireless ext is on primary interface only
8174 ASSERT(dhd->iflist[*ifidx] != NULL);
8175 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8177 if (dhd->iflist[*ifidx]->net) {
8178 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
8181 #endif /* defined(WL_WIRELESS_EXT) */
8184 ASSERT(dhd->iflist[*ifidx] != NULL);
8185 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8186 if (dhd->iflist[*ifidx]->net)
8187 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
8188 #endif /* defined(WL_CFG80211) */
8193 /* send up locally generated event */
8195 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
8197 switch (ntoh32(event->event_type)) {
8199 /* Send up locally generated AMP HCI Events */
8200 case WLC_E_BTA_HCI_EVENT: {
8201 struct sk_buff *p, *skb;
8203 wl_event_msg_t *p_bcm_event;
8212 len = ntoh32(event->datalen);
8213 pktlen = sizeof(bcm_event_t) + len + 2;
8215 ifidx = dhd_ifname2idx(dhd, event->ifname);
8217 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8218 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8220 msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
8222 bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
8223 bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
8224 ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
8226 msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
8228 /* BCM Vendor specific header... */
8229 msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
8230 msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
8231 bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
8233 /* vendor spec header length + pvt data length (private indication
8234 * hdr + actual message itself)
8236 msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
8237 BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
8238 msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
8240 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
8242 /* copy wl_event_msg_t into sk_buf */
8244 /* pointer to wl_event_msg_t in sk_buf */
8245 p_bcm_event = &msg->event;
8246 bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
8248 /* copy hci event into sk_buf */
8249 bcopy(data, (p_bcm_event + 1), len);
8251 msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
8252 ntoh16(msg->bcm_hdr.length));
8253 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
8255 ptr = (char *)(msg + 1);
8256 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
8257 * are no ethertypes which are following this
8262 skb = PKTTONATIVE(dhdp->osh, p);
8266 ifp = dhd->iflist[ifidx];
8268 ifp = dhd->iflist[0];
8271 skb->dev = ifp->net;
8272 skb->protocol = eth_type_trans(skb, skb->dev);
8277 /* Strip header, count, deliver upward */
8278 skb_pull(skb, ETH_HLEN);
8280 /* Send the packet */
8281 if (in_interrupt()) {
8288 /* Could not allocate a sk_buf */
8289 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
8292 } /* case WLC_E_BTA_HCI_EVENT */
8293 #endif /* WLBTAMP */
8300 #ifdef LOG_INTO_TCPDUMP
8302 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
8304 struct sk_buff *p, *skb;
8311 struct ether_header eth;
8313 pktlen = sizeof(eth) + data_len;
8316 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8317 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8319 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
8320 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
8321 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
8322 eth.ether_type = hton16(ETHER_TYPE_BRCM);
8324 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
8325 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
8326 skb = PKTTONATIVE(dhdp->osh, p);
8327 skb_data = skb->data;
8330 ifidx = dhd_ifname2idx(dhd, "wlan0");
8331 ifp = dhd->iflist[ifidx];
8333 ifp = dhd->iflist[0];
8336 skb->dev = ifp->net;
8337 skb->protocol = eth_type_trans(skb, skb->dev);
8338 skb->data = skb_data;
8341 /* Strip header, count, deliver upward */
8342 skb_pull(skb, ETH_HLEN);
8344 /* Send the packet */
8345 if (in_interrupt()) {
8352 /* Could not allocate a sk_buf */
8353 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
8356 #endif /* LOG_INTO_TCPDUMP */
8358 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
8360 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8361 struct dhd_info *dhdinfo = dhd->info;
8363 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8364 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
8366 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
8367 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8369 dhd_os_sdunlock(dhd);
8370 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
8372 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
8376 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
8378 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8379 struct dhd_info *dhdinfo = dhd->info;
8380 if (waitqueue_active(&dhdinfo->ctrl_wait))
8381 wake_up(&dhdinfo->ctrl_wait);
8386 #if defined(BCMSDIO) || defined(BCMPCIE)
8388 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
8391 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8394 /* Issue wl down command before resetting the chip */
8395 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
8396 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
8398 #ifdef PROP_TXSTATUS
8399 if (dhd->pub.wlfc_enabled)
8400 dhd_wlfc_deinit(&dhd->pub);
8401 #endif /* PROP_TXSTATUS */
8403 if (dhd->pub.pno_state)
8404 dhd_pno_deinit(&dhd->pub);
8410 dhd_update_fw_nv_path(dhd);
8411 /* update firmware and nvram path to sdio bus */
8412 dhd_bus_update_fw_nv_path(dhd->pub.bus,
8413 dhd->fw_path, dhd->nv_path, dhd->conf_path);
8415 #endif /* BCMSDIO */
8417 ret = dhd_bus_devreset(&dhd->pub, flag);
8419 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
8428 dhd_net_bus_suspend(struct net_device *dev)
8430 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8431 return dhd_bus_suspend(&dhd->pub);
8435 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
8437 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8438 return dhd_bus_resume(&dhd->pub, stage);
8441 #endif /* BCMSDIO */
8442 #endif /* BCMSDIO || BCMPCIE */
8444 int net_os_set_suspend_disable(struct net_device *dev, int val)
8446 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8450 ret = dhd->pub.suspend_disable_flag;
8451 dhd->pub.suspend_disable_flag = val;
8456 int net_os_set_suspend(struct net_device *dev, int val, int force)
8459 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8462 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8463 ret = dhd_set_suspend(val, &dhd->pub);
8465 ret = dhd_suspend_resume_helper(dhd, val, force);
8468 wl_cfg80211_update_power_mode(dev);
8474 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
8476 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8479 dhd->pub.suspend_bcn_li_dtim = val;
8484 #ifdef PKT_FILTER_SUPPORT
8485 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
8487 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8488 char *filterp = NULL;
8492 if (!dhd_master_mode)
8493 add_remove = !add_remove;
8495 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
8496 (num == DHD_MDNS_FILTER_NUM))
8498 if (num >= dhd->pub.pktfilter_count)
8501 case DHD_BROADCAST_FILTER_NUM:
8502 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
8505 case DHD_MULTICAST4_FILTER_NUM:
8506 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
8509 case DHD_MULTICAST6_FILTER_NUM:
8510 filterp = "103 0 0 0 0xFFFF 0x3333";
8519 dhd->pub.pktfilter[num] = filterp;
8520 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
8521 } else { /* Delete filter */
8522 if (dhd->pub.pktfilter[num] != NULL) {
8523 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
8524 dhd->pub.pktfilter[num] = NULL;
8530 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
8535 /* Packet filtering is set only if we still in early-suspend and
8536 * we need either to turn it ON or turn it OFF
8537 * We can always turn it OFF in case of early-suspend, but we turn it
8538 * back ON only if suspend_disable_flag was not set
8540 if (dhdp && dhdp->up) {
8541 if (dhdp->in_suspend) {
8542 if (!val || (val && !dhdp->suspend_disable_flag))
8543 dhd_enable_packet_filter(val, dhdp);
8549 /* function to enable/disable packet for Network device */
8550 int net_os_enable_packet_filter(struct net_device *dev, int val)
8552 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8554 return dhd_os_enable_packet_filter(&dhd->pub, val);
8556 #endif /* PKT_FILTER_SUPPORT */
8559 dhd_dev_init_ioctl(struct net_device *dev)
8561 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8564 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
8572 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
8574 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
8576 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8578 return (dhd_pno_stop_for_ssid(&dhd->pub));
8580 /* Linux wrapper to call common dhd_pno_set_for_ssid */
8582 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
8583 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
8585 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8587 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
8588 pno_repeat, pno_freq_expo_max, channel_list, nchan));
8591 /* Linux wrapper to call common dhd_pno_enable */
8593 dhd_dev_pno_enable(struct net_device *dev, int enable)
8595 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8597 return (dhd_pno_enable(&dhd->pub, enable));
8600 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
8602 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
8603 struct dhd_pno_hotlist_params *hotlist_params)
8605 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8606 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
8608 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
8610 dhd_dev_pno_stop_for_batch(struct net_device *dev)
8612 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8613 return (dhd_pno_stop_for_batch(&dhd->pub));
8615 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
8617 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
8619 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8620 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
8622 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
8624 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
8626 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8627 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
8629 #endif /* PNO_SUPPORT */
8631 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
8632 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
8635 struct net_device *dev;
8637 dhd = (dhd_info_t *)dhd_info;
8638 dev = dhd->iflist[0]->net;
8644 #if defined(WL_WIRELESS_EXT)
8645 wl_iw_send_priv_event(dev, "HANG");
8647 #if defined(WL_CFG80211)
8648 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8654 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
8658 if (!dhdp->hang_was_sent) {
8659 dhdp->hang_was_sent = 1;
8660 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
8661 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
8667 int net_os_send_hang_message(struct net_device *dev)
8669 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8673 /* Report FW problem when enabled */
8674 if (dhd->pub.hang_report) {
8675 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8676 ret = dhd_os_send_hang_message(&dhd->pub);
8678 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8681 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
8683 /* Enforce bus down to stop any future traffic */
8684 dhd->pub.busstate = DHD_BUS_DOWN;
8689 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
8692 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
8694 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8695 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
8698 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
8699 wl_country_t *cspec)
8701 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8702 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
8704 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
8706 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8707 if (dhd && dhd->pub.up) {
8708 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
8710 wl_update_wiphybands(NULL, notify);
8715 void dhd_bus_band_set(struct net_device *dev, uint band)
8717 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8718 if (dhd && dhd->pub.up) {
8720 wl_update_wiphybands(NULL, true);
8725 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
8727 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8729 if (!fw || fw[0] == '\0')
8732 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
8733 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
8736 if (strstr(fw, "apsta") != NULL) {
8737 DHD_INFO(("GOT APSTA FIRMWARE\n"));
8738 ap_fw_loaded = TRUE;
8740 DHD_INFO(("GOT STA FIRMWARE\n"));
8741 ap_fw_loaded = FALSE;
8747 void dhd_net_if_lock(struct net_device *dev)
8749 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8750 dhd_net_if_lock_local(dhd);
8753 void dhd_net_if_unlock(struct net_device *dev)
8755 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8756 dhd_net_if_unlock_local(dhd);
8759 static void dhd_net_if_lock_local(dhd_info_t *dhd)
8761 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8763 mutex_lock(&dhd->dhd_net_if_mutex);
8767 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
8769 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8771 mutex_unlock(&dhd->dhd_net_if_mutex);
8775 static void dhd_suspend_lock(dhd_pub_t *pub)
8777 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8778 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8780 mutex_lock(&dhd->dhd_suspend_mutex);
8784 static void dhd_suspend_unlock(dhd_pub_t *pub)
8786 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8787 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8789 mutex_unlock(&dhd->dhd_suspend_mutex);
8793 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
8795 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8796 unsigned long flags = 0;
8799 spin_lock_irqsave(&dhd->dhd_lock, flags);
8804 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
8806 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8809 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
8812 /* Linux specific multipurpose spinlock API */
8814 dhd_os_spin_lock_init(osl_t *osh)
8816 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
8817 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
8818 /* and this results in kernel asserts in internal builds */
8819 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
8821 spin_lock_init(lock);
8822 return ((void *)lock);
8825 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
8827 MFREE(osh, lock, sizeof(spinlock_t) + 4);
8830 dhd_os_spin_lock(void *lock)
8832 unsigned long flags = 0;
8835 spin_lock_irqsave((spinlock_t *)lock, flags);
8840 dhd_os_spin_unlock(void *lock, unsigned long flags)
8843 spin_unlock_irqrestore((spinlock_t *)lock, flags);
8847 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
8849 return (atomic_read(&dhd->pend_8021x_cnt));
8852 #define MAX_WAIT_FOR_8021X_TX 100
8855 dhd_wait_pend8021x(struct net_device *dev)
8857 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8858 int timeout = msecs_to_jiffies(10);
8859 int ntimes = MAX_WAIT_FOR_8021X_TX;
8860 int pend = dhd_get_pend_8021x_cnt(dhd);
8862 while (ntimes && pend) {
8864 set_current_state(TASK_INTERRUPTIBLE);
8865 DHD_PERIM_UNLOCK(&dhd->pub);
8866 schedule_timeout(timeout);
8867 DHD_PERIM_LOCK(&dhd->pub);
8868 set_current_state(TASK_RUNNING);
8871 pend = dhd_get_pend_8021x_cnt(dhd);
8875 atomic_set(&dhd->pend_8021x_cnt, 0);
8876 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
8883 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
8887 mm_segment_t old_fs;
8890 /* change to KERNEL_DS address limit */
8894 /* open file to write */
8895 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
8897 printf("%s: open file error\n", __FUNCTION__);
8902 /* Write buf to file */
8903 fp->f_op->write(fp, buf, size, &pos);
8906 /* free buf before return */
8907 MFREE(dhd->osh, buf, size);
8908 /* close file before return */
8910 filp_close(fp, current->files);
8911 /* restore previous address limit */
8916 #endif /* DHD_DEBUG */
8918 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
8920 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8921 unsigned long flags;
8925 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8926 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
8927 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
8928 #ifdef CONFIG_HAS_WAKELOCK
8929 if (dhd->wakelock_rx_timeout_enable)
8930 wake_lock_timeout(&dhd->wl_rxwake,
8931 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
8932 if (dhd->wakelock_ctrl_timeout_enable)
8933 wake_lock_timeout(&dhd->wl_ctrlwake,
8934 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
8936 dhd->wakelock_rx_timeout_enable = 0;
8937 dhd->wakelock_ctrl_timeout_enable = 0;
8938 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8943 int net_os_wake_lock_timeout(struct net_device *dev)
8945 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8949 ret = dhd_os_wake_lock_timeout(&dhd->pub);
8953 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
8955 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8956 unsigned long flags;
8959 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8960 if (val > dhd->wakelock_rx_timeout_enable)
8961 dhd->wakelock_rx_timeout_enable = val;
8962 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8967 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
8969 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8970 unsigned long flags;
8973 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8974 if (val > dhd->wakelock_ctrl_timeout_enable)
8975 dhd->wakelock_ctrl_timeout_enable = val;
8976 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8981 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
8983 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8984 unsigned long flags;
8987 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8988 dhd->wakelock_ctrl_timeout_enable = 0;
8989 #ifdef CONFIG_HAS_WAKELOCK
8990 if (wake_lock_active(&dhd->wl_ctrlwake))
8991 wake_unlock(&dhd->wl_ctrlwake);
8993 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8998 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
9000 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9004 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
9008 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
9010 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9014 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
9018 int dhd_os_wake_lock(dhd_pub_t *pub)
9020 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9021 unsigned long flags;
9025 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9027 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9028 #ifdef CONFIG_HAS_WAKELOCK
9029 wake_lock(&dhd->wl_wifi);
9030 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9031 dhd_bus_dev_pm_stay_awake(pub);
9034 dhd->wakelock_counter++;
9035 ret = dhd->wakelock_counter;
9036 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9041 int net_os_wake_lock(struct net_device *dev)
9043 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9047 ret = dhd_os_wake_lock(&dhd->pub);
9051 int dhd_os_wake_unlock(dhd_pub_t *pub)
9053 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9054 unsigned long flags;
9057 dhd_os_wake_lock_timeout(pub);
9059 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9060 if (dhd->wakelock_counter > 0) {
9061 dhd->wakelock_counter--;
9062 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9063 #ifdef CONFIG_HAS_WAKELOCK
9064 wake_unlock(&dhd->wl_wifi);
9065 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9066 dhd_bus_dev_pm_relax(pub);
9069 ret = dhd->wakelock_counter;
9071 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9076 int dhd_os_check_wakelock(dhd_pub_t *pub)
9078 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9079 KERNEL_VERSION(2, 6, 36)))
9084 dhd = (dhd_info_t *)(pub->info);
9085 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9087 #ifdef CONFIG_HAS_WAKELOCK
9088 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9089 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9090 (wake_lock_active(&dhd->wl_wdwake))))
9092 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9093 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9099 int dhd_os_check_wakelock_all(dhd_pub_t *pub)
9101 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9102 KERNEL_VERSION(2, 6, 36)))
9107 dhd = (dhd_info_t *)(pub->info);
9108 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9110 #ifdef CONFIG_HAS_WAKELOCK
9111 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9112 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9113 wake_lock_active(&dhd->wl_wdwake) ||
9114 wake_lock_active(&dhd->wl_rxwake) ||
9115 wake_lock_active(&dhd->wl_ctrlwake))) {
9118 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9119 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9125 int net_os_wake_unlock(struct net_device *dev)
9127 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9131 ret = dhd_os_wake_unlock(&dhd->pub);
9135 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
9137 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9138 unsigned long flags;
9142 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9143 #ifdef CONFIG_HAS_WAKELOCK
9144 /* if wakelock_wd_counter was never used : lock it at once */
9145 if (!dhd->wakelock_wd_counter)
9146 wake_lock(&dhd->wl_wdwake);
9148 dhd->wakelock_wd_counter++;
9149 ret = dhd->wakelock_wd_counter;
9150 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9155 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
9157 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9158 unsigned long flags;
9162 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9163 if (dhd->wakelock_wd_counter) {
9164 dhd->wakelock_wd_counter = 0;
9165 #ifdef CONFIG_HAS_WAKELOCK
9166 wake_unlock(&dhd->wl_wdwake);
9169 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9174 #ifdef BCMPCIE_OOB_HOST_WAKE
9175 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
9177 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9181 #ifdef CONFIG_HAS_WAKELOCK
9182 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
9188 int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
9190 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9194 #ifdef CONFIG_HAS_WAKELOCK
9195 /* if wl_intrwake is active, unlock it */
9196 if (wake_lock_active(&dhd->wl_intrwake)) {
9197 wake_unlock(&dhd->wl_intrwake);
9203 #endif /* BCMPCIE_OOB_HOST_WAKE */
9205 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
9206 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
9208 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
9210 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9211 unsigned long flags;
9215 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9216 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9217 if (dhd->waive_wakelock == FALSE) {
9218 /* record current lock status */
9219 dhd->wakelock_before_waive = dhd->wakelock_counter;
9220 dhd->waive_wakelock = TRUE;
9222 ret = dhd->wakelock_wd_counter;
9223 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9228 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
9230 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9231 unsigned long flags;
9237 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9238 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9239 if (!dhd->waive_wakelock)
9242 dhd->waive_wakelock = FALSE;
9243 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
9244 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
9245 * the lock in between, do the same by calling wake_unlock or pm_relax
9247 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
9248 #ifdef CONFIG_HAS_WAKELOCK
9249 wake_lock(&dhd->wl_wifi);
9250 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9251 dhd_bus_dev_pm_stay_awake(&dhd->pub);
9253 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
9254 #ifdef CONFIG_HAS_WAKELOCK
9255 wake_unlock(&dhd->wl_wifi);
9256 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9257 dhd_bus_dev_pm_relax(&dhd->pub);
9260 dhd->wakelock_before_waive = 0;
9262 ret = dhd->wakelock_wd_counter;
9263 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9267 bool dhd_os_check_if_up(dhd_pub_t *pub)
9274 /* function to collect firmware, chip id and chip version info */
9275 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
9279 i = snprintf(info_string, sizeof(info_string),
9280 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
9281 printf("%s\n", info_string);
9286 i = snprintf(&info_string[i], sizeof(info_string) - i,
9287 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
9288 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
9291 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
9295 dhd_info_t *dhd = NULL;
9297 if (!net || !DEV_PRIV(net)) {
9298 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
9302 dhd = DHD_DEV_INFO(net);
9306 ifidx = dhd_net2idx(dhd, net);
9307 if (ifidx == DHD_BAD_IF) {
9308 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
9312 DHD_OS_WAKE_LOCK(&dhd->pub);
9313 DHD_PERIM_LOCK(&dhd->pub);
9315 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
9316 dhd_check_hang(net, &dhd->pub, ret);
9318 DHD_PERIM_UNLOCK(&dhd->pub);
9319 DHD_OS_WAKE_UNLOCK(&dhd->pub);
9324 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
9326 struct net_device *net;
9328 net = dhd_idx2net(dhdp, ifidx);
9330 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
9334 return dhd_check_hang(net, dhdp, ret);
9337 /* Return instance */
9338 int dhd_get_instance(dhd_pub_t *dhdp)
9340 return dhdp->info->unit;
9344 #ifdef PROP_TXSTATUS
9346 void dhd_wlfc_plat_init(void *dhd)
9351 void dhd_wlfc_plat_deinit(void *dhd)
9356 bool dhd_wlfc_skip_fc(void)
9360 #endif /* PROP_TXSTATUS */
9364 #include <linux/debugfs.h>
9366 extern uint32 dhd_readregl(void *bp, uint32 addr);
9367 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
9369 typedef struct dhd_dbgfs {
9370 struct dentry *debugfs_dir;
9371 struct dentry *debugfs_mem;
9376 dhd_dbgfs_t g_dbgfs;
9379 dhd_dbg_state_open(struct inode *inode, struct file *file)
9381 file->private_data = inode->i_private;
9386 dhd_dbg_state_read(struct file *file, char __user *ubuf,
9387 size_t count, loff_t *ppos)
9396 if (pos >= g_dbgfs.size || !count)
9398 if (count > g_dbgfs.size - pos)
9399 count = g_dbgfs.size - pos;
9401 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
9402 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
9404 ret = copy_to_user(ubuf, &tmp, 4);
9409 *ppos = pos + count;
9417 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
9425 if (pos >= g_dbgfs.size || !count)
9427 if (count > g_dbgfs.size - pos)
9428 count = g_dbgfs.size - pos;
9430 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
9434 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
9435 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
9442 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
9451 pos = file->f_pos + off;
9454 pos = g_dbgfs.size - off;
9456 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
9459 static const struct file_operations dhd_dbg_state_ops = {
9460 .read = dhd_dbg_state_read,
9461 .write = dhd_debugfs_write,
9462 .open = dhd_dbg_state_open,
9463 .llseek = dhd_debugfs_lseek
9466 static void dhd_dbg_create(void)
9468 if (g_dbgfs.debugfs_dir) {
9469 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
9470 NULL, &dhd_dbg_state_ops);
9474 void dhd_dbg_init(dhd_pub_t *dhdp)
9478 g_dbgfs.dhdp = dhdp;
9479 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
9481 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
9482 if (IS_ERR(g_dbgfs.debugfs_dir)) {
9483 err = PTR_ERR(g_dbgfs.debugfs_dir);
9484 g_dbgfs.debugfs_dir = NULL;
9493 void dhd_dbg_remove(void)
9495 debugfs_remove(g_dbgfs.debugfs_mem);
9496 debugfs_remove(g_dbgfs.debugfs_dir);
9498 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
9501 #endif /* ifdef BCMDBGFS */
9506 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
9508 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
9509 struct sk_buff *skb;
9511 uint16 dport = 0, oldmagic = 0xACAC;
9515 /* timestamp packet */
9517 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
9519 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
9520 /* memcpy(&proto, p1+26, 4); */
9521 memcpy(&dport, p1+40, 2);
9522 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
9523 dport = ntoh16(dport);
9526 /* timestamp only if icmp or udb iperf with port 5555 */
9527 /* if (proto == 17 && dport == tsport) { */
9528 if (dport >= tsport && dport <= tsport + 20) {
9530 skb = (struct sk_buff *) pktbuf;
9532 htsf = dhd_get_htsf(dhd, 0);
9533 memset(skb->data + 44, 0, 2); /* clear checksum */
9534 memcpy(skb->data+82, &oldmagic, 2);
9535 memcpy(skb->data+84, &htsf, 4);
9537 memset(&ts, 0, sizeof(htsfts_t));
9538 ts.magic = HTSFMAGIC;
9539 ts.prio = PKTPRIO(pktbuf);
9540 ts.seqnum = htsf_seqnum++;
9541 ts.c10 = get_cycles();
9543 ts.endmagic = HTSFENDMAGIC;
9545 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
9549 static void dhd_dump_htsfhisto(histo_t *his, char *s)
9551 int pktcnt = 0, curval = 0, i;
9552 for (i = 0; i < (NUMBIN-2); i++) {
9554 printf("%d ", his->bin[i]);
9555 pktcnt += his->bin[i];
9557 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
9558 his->bin[NUMBIN-1], s);
9562 void sorttobin(int value, histo_t *histo)
9567 histo->bin[NUMBIN-1]++;
9570 if (value > histo->bin[NUMBIN-2]) /* store the max value */
9571 histo->bin[NUMBIN-2] = value;
9573 for (i = 0; i < (NUMBIN-2); i++) {
9574 binval += 500; /* 500m s bins */
9575 if (value <= binval) {
9580 histo->bin[NUMBIN-3]++;
9584 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
9586 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9587 struct sk_buff *skb;
9590 int d1, d2, d3, end2end;
9594 skb = PKTTONATIVE(dhdp->osh, pktbuf);
9595 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
9597 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
9598 memcpy(&old_magic, p1+78, 2);
9599 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
9604 if (htsf_ts->magic == HTSFMAGIC) {
9605 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
9606 htsf_ts->cE0 = get_cycles();
9609 if (old_magic == 0xACAC) {
9612 htsf = dhd_get_htsf(dhd, 0);
9613 memcpy(skb->data+92, &htsf, sizeof(uint32));
9615 memcpy(&ts[tsidx].t1, skb->data+80, 16);
9617 d1 = ts[tsidx].t2 - ts[tsidx].t1;
9618 d2 = ts[tsidx].t3 - ts[tsidx].t2;
9619 d3 = ts[tsidx].t4 - ts[tsidx].t3;
9620 end2end = ts[tsidx].t4 - ts[tsidx].t1;
9622 sorttobin(d1, &vi_d1);
9623 sorttobin(d2, &vi_d2);
9624 sorttobin(d3, &vi_d3);
9625 sorttobin(end2end, &vi_d4);
9627 if (end2end > 0 && end2end > maxdelay) {
9629 maxdelaypktno = tspktcnt;
9630 memcpy(&maxdelayts, &ts[tsidx], 16);
9632 if (++tsidx >= TSMAX)
9637 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
9639 uint32 htsf = 0, cur_cycle, delta, delta_us;
9640 uint32 factor, baseval, baseval2;
9646 if (cur_cycle > dhd->htsf.last_cycle)
9647 delta = cur_cycle - dhd->htsf.last_cycle;
9649 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
9654 if (dhd->htsf.coef) {
9655 /* times ten to get the first digit */
9656 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
9657 baseval = (delta*10)/factor;
9658 baseval2 = (delta*10)/(factor+1);
9659 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
9660 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
9663 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
9669 static void dhd_dump_latency(void)
9672 int d1, d2, d3, d4, d5;
9674 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
9675 for (i = 0; i < TSMAX; i++) {
9676 d1 = ts[i].t2 - ts[i].t1;
9677 d2 = ts[i].t3 - ts[i].t2;
9678 d3 = ts[i].t4 - ts[i].t3;
9679 d4 = ts[i].t4 - ts[i].t1;
9680 d5 = ts[max].t4-ts[max].t1;
9681 if (d4 > d5 && d4 > 0) {
9684 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
9685 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
9689 printf("current idx = %d \n", tsidx);
9691 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
9692 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
9693 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
9694 maxdelayts.t2 - maxdelayts.t1,
9695 maxdelayts.t3 - maxdelayts.t2,
9696 maxdelayts.t4 - maxdelayts.t3,
9697 maxdelayts.t4 - maxdelayts.t1);
9702 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
9714 memset(&ioc, 0, sizeof(ioc));
9715 memset(&tsf_buf, 0, sizeof(tsf_buf));
9717 ioc.cmd = WLC_GET_VAR;
9719 ioc.len = (uint)sizeof(buf);
9722 strncpy(buf, "tsf", sizeof(buf) - 1);
9723 buf[sizeof(buf) - 1] = '\0';
9724 s1 = dhd_get_htsf(dhd, 0);
9725 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
9727 DHD_ERROR(("%s: tsf is not supported by device\n",
9728 dhd_ifname(&dhd->pub, ifidx)));
9733 s2 = dhd_get_htsf(dhd, 0);
9735 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
9736 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
9737 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
9738 dhd->htsf.coefdec2, s2-tsf_buf.low);
9739 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
9743 void htsf_update(dhd_info_t *dhd, void *data)
9745 static ulong cur_cycle = 0, prev_cycle = 0;
9746 uint32 htsf, tsf_delta = 0;
9747 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
9751 /* cycles_t in inlcude/mips/timex.h */
9755 prev_cycle = cur_cycle;
9758 if (cur_cycle > prev_cycle)
9759 cyc_delta = cur_cycle - prev_cycle;
9763 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
9767 printf(" tsf update ata point er is null \n");
9769 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
9770 memcpy(&cur_tsf, data, sizeof(tsf_t));
9772 if (cur_tsf.low == 0) {
9773 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
9777 if (cur_tsf.low > prev_tsf.low)
9778 tsf_delta = (cur_tsf.low - prev_tsf.low);
9780 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
9781 cur_tsf.low, prev_tsf.low));
9782 if (cur_tsf.high > prev_tsf.high) {
9783 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
9784 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
9787 return; /* do not update */
9791 hfactor = cyc_delta / tsf_delta;
9792 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
9793 dec1 = tmp/tsf_delta;
9794 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
9795 tmp = (tmp - (dec1*tsf_delta))*10;
9796 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
9815 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
9816 dhd->htsf.coef = hfactor;
9817 dhd->htsf.last_cycle = cur_cycle;
9818 dhd->htsf.last_tsf = cur_tsf.low;
9819 dhd->htsf.coefdec1 = dec1;
9820 dhd->htsf.coefdec2 = dec2;
9823 htsf = prev_tsf.low;
9827 #endif /* WLMEDIA_HTSF */
9829 #ifdef CUSTOM_SET_CPUCORE
9830 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
9832 int e_dpc = 0, e_rxf = 0, retry_set = 0;
9834 if (!(dhd->chan_isvht80)) {
9835 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
9842 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9843 cpumask_of(DPC_CPUCORE));
9845 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9846 cpumask_of(PRIMARY_CPUCORE));
9848 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9849 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
9854 } while (e_dpc < 0);
9859 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9860 cpumask_of(RXF_CPUCORE));
9862 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9863 cpumask_of(PRIMARY_CPUCORE));
9865 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9866 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
9871 } while (e_rxf < 0);
9873 #ifdef DHD_OF_SUPPORT
9874 interrupt_set_cpucore(set);
9875 #endif /* DHD_OF_SUPPORT */
9876 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
9880 #endif /* CUSTOM_SET_CPUCORE */
9881 #if defined(DHD_TCP_WINSIZE_ADJUST)
9882 static int dhd_port_list_match(int port)
9885 for (i = 0; i < MAX_TARGET_PORTS; i++) {
9886 if (target_ports[i] == port)
9891 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
9893 struct iphdr *ipheader;
9894 struct tcphdr *tcpheader;
9896 int32 incremental_checksum;
9898 if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
9900 if (skb == NULL || skb->data == NULL)
9903 ipheader = (struct iphdr*)(skb->data);
9905 if (ipheader->protocol == IPPROTO_TCP) {
9906 tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
9908 win_size = ntoh16(tcpheader->window);
9909 if (win_size < MIN_TCP_WIN_SIZE &&
9910 dhd_port_list_match(ntoh16(tcpheader->dest))) {
9911 incremental_checksum = ntoh16(tcpheader->check);
9912 incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
9913 if (incremental_checksum < 0)
9914 --incremental_checksum;
9915 tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
9916 tcpheader->check = hton16((unsigned short)incremental_checksum);
9919 skb_push(skb, (ipheader->ihl)<<2);
9922 #endif /* DHD_TCP_WINSIZE_ADJUST */
9924 /* Get interface specific ap_isolate configuration */
9925 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
9927 dhd_info_t *dhd = dhdp->info;
9930 ASSERT(idx < DHD_MAX_IFS);
9932 ifp = dhd->iflist[idx];
9934 return ifp->ap_isolate;
9937 /* Set interface specific ap_isolate configuration */
9938 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
9940 dhd_info_t *dhd = dhdp->info;
9943 ASSERT(idx < DHD_MAX_IFS);
9945 ifp = dhd->iflist[idx];
9947 ifp->ap_isolate = val;
9953 /* Returns interface specific WMF configuration */
9954 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
9956 dhd_info_t *dhd = dhdp->info;
9959 ASSERT(idx < DHD_MAX_IFS);
9961 ifp = dhd->iflist[idx];
9964 #endif /* DHD_WMF */
9967 #ifdef DHD_UNICAST_DHCP
9969 dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
9970 uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
9972 uint8 *frame = PKTDATA(pub->osh, pktbuf);
9973 int length = PKTLEN(pub->osh, pktbuf);
9974 uint8 *pt; /* Pointer to type field */
9977 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
9978 if (length < ETHER_HDR_LEN) {
9979 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
9980 __FUNCTION__, length));
9982 } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
9983 /* Frame is Ethernet II */
9984 pt = frame + ETHER_TYPE_OFFSET;
9985 } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
9986 !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
9987 pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
9990 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
9995 ethertype = ntoh16_ua(pt);
9997 /* Skip VLAN tag, if any */
9998 if (ethertype == ETHER_TYPE_8021Q) {
10001 if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
10002 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
10003 __FUNCTION__, length));
10007 ethertype = ntoh16_ua(pt);
10010 *data_ptr = pt + ETHER_TYPE_LEN;
10011 *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
10012 *et_ptr = ethertype;
10018 dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
10019 uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
10021 struct ipv4_hdr *iph; /* IP frame pointer */
10022 int iplen; /* IP frame length */
10023 uint16 ethertype, iphdrlen, ippktlen;
10028 if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
10029 &iplen, ðertype, &snap) != 0)
10032 if (ethertype != ETHER_TYPE_IP) {
10036 /* We support IPv4 only */
10037 if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
10041 /* Header length sanity */
10042 iphdrlen = IPV4_HLEN(iph);
10045 * Packet length sanity; sometimes we receive eth-frame size bigger
10046 * than the IP content, which results in a bad tcp chksum
10048 ippktlen = ntoh16(iph->tot_len);
10049 if (ippktlen < iplen) {
10051 DHD_INFO(("%s: extra frame length ignored\n",
10054 } else if (ippktlen > iplen) {
10055 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
10056 __FUNCTION__, ippktlen - iplen));
10060 if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
10061 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
10062 __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
10067 * We don't handle fragmented IP packets. A first frag is indicated by the MF
10068 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
10070 iph_frag = ntoh16(iph->frag);
10072 if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
10073 DHD_INFO(("DHD:%s: IP fragment not handled\n",
10078 prot = IPV4_PROT(iph);
10080 *data_ptr = (((uint8 *)iph) + iphdrlen);
10081 *len_ptr = iplen - iphdrlen;
10086 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
10088 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
10090 dhd_sta_t* stainfo;
10091 uint8 *eh = PKTDATA(pub->osh, pktbuf);
10100 if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
10102 if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
10104 if (prot != IP_PROT_UDP)
10106 /* check frame length, at least UDP_HDR_LEN */
10107 if (udpl < UDP_HDR_LEN) {
10108 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
10112 port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
10113 /* only process DHCP packets from server to client */
10114 if (port != DHCP_PORT_CLIENT)
10117 dhcp = udph + UDP_HDR_LEN;
10118 dhcpl = udpl - UDP_HDR_LEN;
10120 if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
10121 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
10125 /* only process DHCP reply(offer/ack) packets */
10126 if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
10128 chaddr = dhcp + DHCP_CHADDR_OFFSET;
10129 stainfo = dhd_find_sta(pub, ifidx, chaddr);
10131 bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
10136 #endif /* DHD_UNICAST_DHD */
10137 #ifdef DHD_L2_FILTER
10138 /* Check if packet type is ICMP ECHO */
10140 int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
10142 struct bcmicmp_hdr *icmph;
10146 if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
10148 if (prot == IP_PROT_ICMP) {
10149 if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
10154 #endif /* DHD_L2_FILTER */
10156 #ifdef SET_RPS_CPUS
10157 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
10159 struct rps_map *old_map, *map;
10160 cpumask_var_t mask;
10162 static DEFINE_SPINLOCK(rps_map_lock);
10164 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10166 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
10167 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
10171 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
10173 free_cpumask_var(mask);
10174 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
10178 map = kzalloc(max_t(unsigned int,
10179 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
10182 free_cpumask_var(mask);
10183 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
10188 for_each_cpu(cpu, mask)
10189 map->cpus[i++] = cpu;
10195 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
10199 spin_lock(&rps_map_lock);
10200 old_map = rcu_dereference_protected(queue->rps_map,
10201 lockdep_is_held(&rps_map_lock));
10202 rcu_assign_pointer(queue->rps_map, map);
10203 spin_unlock(&rps_map_lock);
10206 static_key_slow_inc(&rps_needed);
10208 kfree_rcu(old_map, rcu);
10209 static_key_slow_dec(&rps_needed);
10211 free_cpumask_var(mask);
10213 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
10217 void custom_rps_map_clear(struct netdev_rx_queue *queue)
10219 struct rps_map *map;
10221 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10223 map = rcu_dereference_protected(queue->rps_map, 1);
10225 RCU_INIT_POINTER(queue->rps_map, NULL);
10226 kfree_rcu(map, rcu);
10227 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
10230 #endif /* SET_RPS_CPUS */
10232 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
10234 SDA_setSharedMemory4Send(unsigned int buffer_id,
10235 unsigned char *buffer, unsigned int buffer_size,
10236 unsigned int packet_size, unsigned int headroom_size)
10238 dhd_info_t *dhd = dhd_global;
10240 sda_packet_length = packet_size;
10248 SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb)
10250 dhd_info_t *dhd = dhd_global;
10259 SDA_getTsf(unsigned char vif_id)
10261 dhd_info_t *dhd = dhd_global;
10263 char buf[WLC_IOCTL_SMLEN];
10271 memset(buf, 0, sizeof(buf));
10273 if (vif_id == 0) /* wlan0 tsf */
10274 ifidx = dhd_ifname2idx(dhd, "wlan0");
10275 else if (vif_id == 1) /* p2p0 tsf */
10276 ifidx = dhd_ifname2idx(dhd, "p2p0");
10278 bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf));
10280 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) {
10281 DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__));
10285 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
10286 tsf_val = (uint64)tsf_buf.high;
10287 DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
10288 __FUNCTION__, tsf_buf.high, tsf_buf.low));
10290 return ((tsf_val << 32) | tsf_buf.low);
10292 EXPORT_SYMBOL(SDA_getTsf);
10297 dhd_info_t *dhd = dhd_global;
10299 char iovbuf[WLC_IOCTL_SMLEN];
10301 bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf));
10302 dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
10304 DHD_TRACE(("%s\n", __FUNCTION__));
10308 extern struct net_device *wl0dot1_dev;
10311 BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size)
10313 struct sk_buff *skb;
10314 sda_packet_t *shm_packet = packet;
10315 dhd_info_t *dhd = dhd_global;
10318 static unsigned int cnt_t = 1;
10324 if (dhd->is_wlanaudio_blist) {
10325 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
10326 if (dhd->wlanaudio_blist[cnt].is_blacklist == true) {
10327 if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet,
10328 shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN))
10334 if ((cnt_t % 10000) == 0)
10339 /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
10341 skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC);
10343 skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE);
10344 skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE);
10345 skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */
10348 skb->dev = wl0dot1_dev;
10349 shm_packet->txTsf = 0x0;
10350 shm_packet->rxTsf = 0x0;
10351 memcpy(skb->data, &shm_packet->headroom,
10352 sda_packet_length - OFFSETOF(sda_packet_t, headroom));
10353 shm_packet->desc.ready_to_copy = 0;
10355 dhd_start_xmit(skb, skb->dev);
10359 SDA_registerCallback4Recv(unsigned char *pBufferTotal,
10360 unsigned int BufferTotalSize)
10362 dhd_info_t *dhd = dhd_global;
10371 SDA_setSharedMemory4Recv(unsigned char *pBufferTotal,
10372 unsigned int BufferTotalSize,
10373 unsigned int BufferUnitSize,
10374 unsigned int Headroomsize)
10376 dhd_info_t *dhd = dhd_global;
10385 SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize)
10387 dhd_info_t *dhd = dhd_global;
10394 EXPORT_SYMBOL(SDA_setSharedMemory4Send);
10395 EXPORT_SYMBOL(SDA_registerCallback4SendDone);
10396 EXPORT_SYMBOL(SDA_syncTsf);
10397 EXPORT_SYMBOL(SDA_function4Send);
10398 EXPORT_SYMBOL(SDA_registerCallback4Recv);
10399 EXPORT_SYMBOL(SDA_setSharedMemory4Recv);
10400 EXPORT_SYMBOL(SDA_function4RecvDone);
10402 #endif /* CUSTOMER_HW20 && WLANAUDIO */
10404 void *dhd_get_pub(struct net_device *dev)
10406 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
10407 return (void *)&dhdinfo->pub;