2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * $Copyright Open Broadcom Corporation$
7 * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $
14 #include <linux/syscalls.h>
15 #include <event_log.h>
16 #endif /* SHOW_LOGTRACE */
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/random.h>
28 #include <linux/spinlock.h>
29 #include <linux/ethtool.h>
30 #include <linux/fcntl.h>
33 #include <linux/reboot.h>
34 #include <linux/notifier.h>
35 #include <net/addrconf.h>
36 #ifdef ENABLE_ADAPTIVE_SCHED
37 #include <linux/cpufreq.h>
38 #endif /* ENABLE_ADAPTIVE_SCHED */
40 #include <asm/uaccess.h>
41 #include <asm/unaligned.h>
45 #include <bcmendian.h>
48 #include <proto/ethernet.h>
49 #include <proto/bcmevent.h>
50 #include <proto/vlan.h>
52 #include <proto/bcmicmp.h>
54 #include <proto/802.3.h>
56 #include <dngl_stats.h>
57 #include <dhd_linux_wq.h>
59 #include <dhd_linux.h>
60 #ifdef PCIE_FULL_DONGLE
61 #include <dhd_flowring.h>
64 #include <dhd_proto.h>
65 #include <dhd_config.h>
67 #ifdef CONFIG_HAS_WAKELOCK
68 #include <linux/wakelock.h>
71 #include <wl_cfg80211.h>
74 #include <wl_cfgp2p.h>
80 #include <proto/802.11_bta.h>
81 #include <proto/bt_amp_hci.h>
86 #include <linux/compat.h>
90 #include <dhd_wmf_linux.h>
93 #ifdef AMPDU_VO_ENABLE
94 #include <proto/802.1d.h>
95 #endif /* AMPDU_VO_ENABLE */
96 #ifdef DHDTCPACK_SUPPRESS
98 #endif /* DHDTCPACK_SUPPRESS */
100 #if defined(DHD_TCP_WINSIZE_ADJUST)
101 #include <linux/tcp.h>
103 #endif /* DHD_TCP_WINSIZE_ADJUST */
106 #include <linux/time.h>
109 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
110 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
111 #define TSMAX 1000 /* max no. of timing record kept */
114 static uint32 tsidx = 0;
115 static uint32 htsf_seqnum = 0;
117 struct timeval tsync;
118 static uint32 tsport = 5010;
120 typedef struct histo_ {
124 #if !ISPOWEROF2(DHD_SDALIGN)
125 #error DHD_SDALIGN is not a power of 2!
128 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
129 #endif /* WLMEDIA_HTSF */
131 #if defined(DHD_TCP_WINSIZE_ADJUST)
132 #define MIN_TCP_WIN_SIZE 18000
133 #define WIN_SIZE_SCALE_FACTOR 2
134 #define MAX_TARGET_PORTS 5
136 static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
137 static uint dhd_use_tcp_window_size_adjust = FALSE;
138 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
139 #endif /* DHD_TCP_WINSIZE_ADJUST */
143 extern bool ap_cfg_running;
144 extern bool ap_fw_loaded;
148 #ifdef ENABLE_ADAPTIVE_SCHED
149 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
150 #ifndef CUSTOM_CPUFREQ_THRESH
151 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
152 #endif /* CUSTOM_CPUFREQ_THRESH */
153 #endif /* ENABLE_ADAPTIVE_SCHED */
155 /* enable HOSTIP cache update from the host side when an eth0:N is up */
156 #define AOE_IP_ALIAS_SUPPORT 1
160 #include <bcm_rpc_tp.h>
163 #include <wlfc_proto.h>
164 #include <dhd_wlfc.h>
167 #include <wl_android.h>
169 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
171 #endif /* CUSTOMER_HW20 && WLANAUDIO */
173 /* Maximum STA per radio */
174 #define DHD_MAX_STA 32
177 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
178 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
179 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
181 #ifdef ARP_OFFLOAD_SUPPORT
182 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
183 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
184 unsigned long event, void *ptr);
185 static struct notifier_block dhd_inetaddr_notifier = {
186 .notifier_call = dhd_inetaddr_notifier_call
188 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
189 * created in kernel notifier link list (with 'next' pointing to itself)
191 static bool dhd_inetaddr_notifier_registered = FALSE;
192 #endif /* ARP_OFFLOAD_SUPPORT */
195 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
196 unsigned long event, void *ptr);
197 static struct notifier_block dhd_inet6addr_notifier = {
198 .notifier_call = dhd_inet6addr_notifier_call
200 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
201 * created in kernel notifier link list (with 'next' pointing to itself)
203 static bool dhd_inet6addr_notifier_registered = FALSE;
206 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
207 #include <linux/suspend.h>
208 volatile bool dhd_mmc_suspend = FALSE;
209 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
210 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
212 #if defined(OOB_INTR_ONLY)
213 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
215 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
216 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
218 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
219 MODULE_LICENSE("GPL v2");
220 #endif /* LinuxVer */
225 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
227 #ifndef PROP_TXSTATUS
228 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
230 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
232 #endif /* BCM_FD_AGGR */
235 extern bool dhd_wlfc_skip_fc(void);
236 extern void dhd_wlfc_plat_init(void *dhd);
237 extern void dhd_wlfc_plat_deinit(void *dhd);
238 #endif /* PROP_TXSTATUS */
240 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
246 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
248 /* Linux wireless extension support */
249 #if defined(WL_WIRELESS_EXT)
251 extern wl_iw_extra_params_t g_wl_iw_params;
252 #endif /* defined(WL_WIRELESS_EXT) */
254 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
255 #include <linux/earlysuspend.h>
256 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
258 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
260 #ifdef PKT_FILTER_SUPPORT
261 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
262 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
263 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
268 extern int dhd_read_macaddr(struct dhd_info *dhd);
270 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
273 extern int dhd_write_macaddr(struct ether_addr *mac);
275 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
279 #if defined(SOFTAP_TPUT_ENHANCE)
280 extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
281 extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
282 #endif /* SOFTAP_TPUT_ENHANCE */
286 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
287 void custom_rps_map_clear(struct netdev_rx_queue *queue);
288 #ifdef CONFIG_MACH_UNIVERSAL5433
289 #define RPS_CPUS_MASK "10"
291 #define RPS_CPUS_MASK "6"
292 #endif /* CONFIG_MACH_UNIVERSAL5433 */
293 #endif /* SET_RPS_CPUS */
295 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
296 static struct notifier_block dhd_reboot_notifier = {
297 .notifier_call = dhd_reboot_callback,
302 typedef struct dhd_if_event {
303 struct list_head list;
304 wl_event_data_if_t event;
305 char name[IFNAMSIZ+1];
306 uint8 mac[ETHER_ADDR_LEN];
309 /* Interface control information */
310 typedef struct dhd_if {
311 struct dhd_info *info; /* back pointer to dhd_info */
312 /* OS/stack specifics */
313 struct net_device *net;
314 int idx; /* iface idx in dongle */
315 uint subunit; /* subunit */
316 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
319 uint8 bssidx; /* bsscfg index for the interface */
320 bool attached; /* Delayed attachment when unset */
321 bool txflowcontrol; /* Per interface flow control indicator */
322 char name[IFNAMSIZ+1]; /* linux interface name */
323 struct net_device_stats stats;
325 dhd_wmf_t wmf; /* per bsscfg wmf setting */
327 #ifdef PCIE_FULL_DONGLE
328 struct list_head sta_list; /* sll of associated stations */
329 #if !defined(BCM_GMAC3)
330 spinlock_t sta_list_lock; /* lock for manipulating sll */
331 #endif /* ! BCM_GMAC3 */
332 #endif /* PCIE_FULL_DONGLE */
333 uint32 ap_isolate; /* ap-isolation settings */
346 uint32 coef; /* scaling factor */
347 uint32 coefdec1; /* first decimal */
348 uint32 coefdec2; /* second decimal */
358 static tstamp_t ts[TSMAX];
359 static tstamp_t maxdelayts;
360 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
362 #endif /* WLMEDIA_HTSF */
364 struct ipv6_work_info_t {
370 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
371 #define MAX_WLANAUDIO_BLACKLIST 4
373 struct wlanaudio_blacklist {
376 ulong txfail_jiffies;
377 struct ether_addr blacklist_addr;
379 #endif /* CUSTOMER_HW20 && WLANAUDIO */
381 /* When Perimeter locks are deployed, any blocking calls must be preceeded
382 * with a PERIM UNLOCK and followed by a PERIM LOCK.
383 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
384 * wait_event_timeout().
387 /* Local private structure (extension of pub) */
388 typedef struct dhd_info {
389 #if defined(WL_WIRELESS_EXT)
390 wl_iw_t iw; /* wireless extensions state (must be first) */
391 #endif /* defined(WL_WIRELESS_EXT) */
393 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
395 void *adapter; /* adapter information, interrupt, fw path etc. */
396 char fw_path[PATH_MAX]; /* path to firmware image */
397 char nv_path[PATH_MAX]; /* path to nvram vars file */
398 char conf_path[PATH_MAX]; /* path to config vars file */
400 struct semaphore proto_sem;
402 spinlock_t wlfc_spinlock;
404 #endif /* PROP_TXSTATUS */
408 wait_queue_head_t ioctl_resp_wait;
409 uint32 default_wd_interval;
411 struct timer_list timer;
413 struct tasklet_struct tasklet;
418 struct semaphore sdsem;
419 tsk_ctl_t thr_dpc_ctl;
420 tsk_ctl_t thr_wdt_ctl;
422 tsk_ctl_t thr_rxf_ctl;
424 bool rxthread_enabled;
427 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
428 struct wake_lock wl_wifi; /* Wifi wakelock */
429 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
430 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
431 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
432 #ifdef BCMPCIE_OOB_HOST_WAKE
433 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
434 #endif /* BCMPCIE_OOB_HOST_WAKE */
435 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
437 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
438 /* net_device interface lock, prevent race conditions among net_dev interface
439 * calls and wifi_on or wifi_off
441 struct mutex dhd_net_if_mutex;
442 struct mutex dhd_suspend_mutex;
444 spinlock_t wakelock_spinlock;
445 uint32 wakelock_counter;
446 int wakelock_wd_counter;
447 int wakelock_rx_timeout_enable;
448 int wakelock_ctrl_timeout_enable;
450 uint32 wakelock_before_waive;
452 /* Thread to issue ioctl for multicast */
453 wait_queue_head_t ctrl_wait;
454 atomic_t pend_8021x_cnt;
455 dhd_attach_states_t dhd_state;
457 dhd_event_log_t event_data;
458 #endif /* SHOW_LOGTRACE */
460 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
461 struct early_suspend early_suspend;
462 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
464 #ifdef ARP_OFFLOAD_SUPPORT
466 #endif /* ARP_OFFLOAD_SUPPORT */
470 struct timer_list rpcth_timer;
471 bool rpcth_timer_active;
474 #ifdef DHDTCPACK_SUPPRESS
475 spinlock_t tcpack_lock;
476 #endif /* DHDTCPACK_SUPPRESS */
477 void *dhd_deferred_wq;
478 #ifdef DEBUG_CPU_FREQ
479 struct notifier_block freq_trans;
480 int __percpu *new_freq;
483 struct notifier_block pm_notifier;
484 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
485 struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST];
486 bool is_wlanaudio_blist;
487 #endif /* CUSTOMER_HW20 && WLANAUDIO */
490 #define DHDIF_FWDER(dhdif) FALSE
492 /* Flag to indicate if we should download firmware on driver load */
493 uint dhd_download_fw_on_driverload = TRUE;
495 /* Definitions to provide path to the firmware and nvram
496 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
498 char firmware_path[MOD_PARAM_PATHLEN];
499 char nvram_path[MOD_PARAM_PATHLEN];
500 char config_path[MOD_PARAM_PATHLEN];
502 /* backup buffer for firmware and nvram path */
503 char fw_bak_path[MOD_PARAM_PATHLEN];
504 char nv_bak_path[MOD_PARAM_PATHLEN];
506 /* information string to keep firmware, chio, cheip version info visiable from log */
507 char info_string[MOD_PARAM_INFOLEN];
508 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
510 int disable_proptx = 0;
511 module_param(op_mode, int, 0644);
512 extern int wl_control_wl_start(struct net_device *dev);
513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
514 struct semaphore dhd_registration_sem;
515 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
517 /* deferred handlers */
518 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
519 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
520 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
521 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
523 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
527 extern void dhd_netdev_free(struct net_device *ndev);
528 #endif /* WL_CFG80211 */
531 module_param(dhd_msg_level, int, 0);
532 #if defined(WL_WIRELESS_EXT)
533 module_param(iw_msg_level, int, 0);
536 module_param(wl_dbg_level, int, 0);
538 module_param(android_msg_level, int, 0);
539 module_param(config_msg_level, int, 0);
541 #ifdef ARP_OFFLOAD_SUPPORT
542 /* ARP offload enable */
543 uint dhd_arp_enable = TRUE;
544 module_param(dhd_arp_enable, uint, 0);
546 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
548 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
550 module_param(dhd_arp_mode, uint, 0);
551 #endif /* ARP_OFFLOAD_SUPPORT */
553 /* Disable Prop tx */
554 module_param(disable_proptx, int, 0644);
555 /* load firmware and/or nvram values from the filesystem */
556 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
557 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
558 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
560 /* Watchdog interval */
562 /* extend watchdog expiration to 2 seconds when DPC is running */
563 #define WATCHDOG_EXTEND_INTERVAL (2000)
565 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
566 module_param(dhd_watchdog_ms, uint, 0);
568 #if defined(DHD_DEBUG)
569 /* Console poll interval */
570 uint dhd_console_ms = 0;
571 module_param(dhd_console_ms, uint, 0644);
572 #endif /* defined(DHD_DEBUG) */
575 uint dhd_slpauto = TRUE;
576 module_param(dhd_slpauto, uint, 0);
578 #ifdef PKT_FILTER_SUPPORT
579 /* Global Pkt filter enable control */
580 uint dhd_pkt_filter_enable = TRUE;
581 module_param(dhd_pkt_filter_enable, uint, 0);
584 /* Pkt filter init setup */
585 uint dhd_pkt_filter_init = 0;
586 module_param(dhd_pkt_filter_init, uint, 0);
588 /* Pkt filter mode control */
589 uint dhd_master_mode = FALSE;
590 module_param(dhd_master_mode, uint, 0);
592 int dhd_watchdog_prio = 0;
593 module_param(dhd_watchdog_prio, int, 0);
595 /* DPC thread priority */
596 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
597 module_param(dhd_dpc_prio, int, 0);
599 /* RX frame thread priority */
600 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
601 module_param(dhd_rxf_prio, int, 0);
603 int passive_channel_skip = 0;
604 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
606 #if !defined(BCMDHDUSB)
607 extern int dhd_dongle_ramsize;
608 module_param(dhd_dongle_ramsize, int, 0);
609 #endif /* BCMDHDUSB */
611 /* Keep track of number of instances */
612 static int dhd_found = 0;
613 static int instance_base = 0; /* Starting instance number */
614 module_param(instance_base, int, 0644);
616 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
617 dhd_info_t *dhd_global = NULL;
618 #endif /* CUSTOMER_HW20 && WLANAUDIO */
622 /* DHD Perimiter lock only used in router with bypass forwarding. */
623 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
624 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
625 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
626 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
627 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
629 #ifdef PCIE_FULL_DONGLE
630 #if defined(BCM_GMAC3)
631 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
632 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
633 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
634 #else /* ! BCM_GMAC3 */
635 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
636 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
637 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
638 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
639 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
640 #endif /* ! BCM_GMAC3 */
641 #endif /* PCIE_FULL_DONGLE */
643 /* Control fw roaming */
645 uint dhd_roam_disable = 0;
647 uint dhd_roam_disable = 0;
650 /* Control radio state */
651 uint dhd_radio_up = 1;
653 /* Network inteface name */
654 char iface_name[IFNAMSIZ] = {'\0'};
655 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
657 /* The following are specific to the SDIO dongle */
659 /* IOCTL response timeout */
660 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
662 /* Idle timeout for backplane clock */
663 int dhd_idletime = DHD_IDLETIME_TICKS;
664 module_param(dhd_idletime, int, 0);
667 uint dhd_poll = FALSE;
668 module_param(dhd_poll, uint, 0);
671 uint dhd_intr = TRUE;
672 module_param(dhd_intr, uint, 0);
674 /* SDIO Drive Strength (in milliamps) */
675 uint dhd_sdiod_drive_strength = 6;
676 module_param(dhd_sdiod_drive_strength, uint, 0);
680 extern uint dhd_txbound;
681 extern uint dhd_rxbound;
682 module_param(dhd_txbound, uint, 0);
683 module_param(dhd_rxbound, uint, 0);
685 /* Deferred transmits */
686 extern uint dhd_deferred_tx;
687 module_param(dhd_deferred_tx, uint, 0);
690 extern void dhd_dbg_init(dhd_pub_t *dhdp);
691 extern void dhd_dbg_remove(void);
692 #endif /* BCMDBGFS */
698 /* Echo packet generator (pkts/s) */
700 module_param(dhd_pktgen, uint, 0);
702 /* Echo packet len (0 => sawtooth, max 2040) */
703 uint dhd_pktgen_len = 0;
704 module_param(dhd_pktgen_len, uint, 0);
707 #if defined(BCMSUP_4WAY_HANDSHAKE)
708 /* Use in dongle supplicant for 4-way handshake */
709 uint dhd_use_idsup = 0;
710 module_param(dhd_use_idsup, uint, 0);
711 #endif /* BCMSUP_4WAY_HANDSHAKE */
713 extern char dhd_version[];
715 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
716 static void dhd_net_if_lock_local(dhd_info_t *dhd);
717 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
718 static void dhd_suspend_lock(dhd_pub_t *dhdp);
719 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
722 void htsf_update(dhd_info_t *dhd, void *data);
723 tsf_t prev_tsf, cur_tsf;
725 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
726 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
727 static void dhd_dump_latency(void);
728 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
729 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
730 static void dhd_dump_htsfhisto(histo_t *his, char *s);
731 #endif /* WLMEDIA_HTSF */
733 /* Monitor interface */
734 int dhd_monitor_init(void *dhd_pub);
735 int dhd_monitor_uninit(void);
738 #if defined(WL_WIRELESS_EXT)
739 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
740 #endif /* defined(WL_WIRELESS_EXT) */
742 static void dhd_dpc(ulong data);
744 extern int dhd_wait_pend8021x(struct net_device *dev);
745 void dhd_os_wd_timer_extend(void *bus, bool extend);
749 #error TOE requires BDC
751 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
752 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
755 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
756 wl_event_msg_t *event_ptr, void **data_ptr);
757 #ifdef DHD_UNICAST_DHCP
758 static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
759 static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
760 int *len_ptr, uint8 *prot_ptr);
761 static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
762 int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
764 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
765 #endif /* DHD_UNICAST_DHCP */
767 static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
769 #if defined(CONFIG_PM_SLEEP)
770 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
772 int ret = NOTIFY_DONE;
773 bool suspend = FALSE;
774 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
776 BCM_REFERENCE(dhdinfo);
778 case PM_HIBERNATION_PREPARE:
779 case PM_SUSPEND_PREPARE:
782 case PM_POST_HIBERNATION:
783 case PM_POST_SUSPEND:
788 #if defined(SUPPORT_P2P_GO_PS)
791 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
792 dhd_wlfc_suspend(&dhdinfo->pub);
793 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
795 dhd_wlfc_resume(&dhdinfo->pub);
797 #endif /* defined(SUPPORT_P2P_GO_PS) */
799 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
800 KERNEL_VERSION(2, 6, 39))
801 dhd_mmc_suspend = suspend;
808 static struct notifier_block dhd_pm_notifier = {
809 .notifier_call = dhd_pm_callback,
812 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
813 * created in kernel notifier link list (with 'next' pointing to itself)
815 static bool dhd_pm_notifier_registered = FALSE;
817 extern int register_pm_notifier(struct notifier_block *nb);
818 extern int unregister_pm_notifier(struct notifier_block *nb);
819 #endif /* CONFIG_PM_SLEEP */
821 /* Request scheduling of the bus rx frame */
822 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
823 static void dhd_os_rxflock(dhd_pub_t *pub);
824 static void dhd_os_rxfunlock(dhd_pub_t *pub);
826 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
827 typedef struct dhd_dev_priv {
828 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
829 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
830 int ifidx; /* interface index */
833 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
834 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
835 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
836 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
837 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
839 /** Clear the dhd net_device's private structure. */
841 dhd_dev_priv_clear(struct net_device * dev)
843 dhd_dev_priv_t * dev_priv;
844 ASSERT(dev != (struct net_device *)NULL);
845 dev_priv = DHD_DEV_PRIV(dev);
846 dev_priv->dhd = (dhd_info_t *)NULL;
847 dev_priv->ifp = (dhd_if_t *)NULL;
848 dev_priv->ifidx = DHD_BAD_IF;
851 /** Setup the dhd net_device's private structure. */
853 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
856 dhd_dev_priv_t * dev_priv;
857 ASSERT(dev != (struct net_device *)NULL);
858 dev_priv = DHD_DEV_PRIV(dev);
861 dev_priv->ifidx = ifidx;
864 #ifdef PCIE_FULL_DONGLE
866 /** Dummy objects are defined with state representing bad|down.
867 * Performance gains from reducing branch conditionals, instruction parallelism,
868 * dual issue, reducing load shadows, avail of larger pipelines.
869 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
870 * is accessed via the dhd_sta_t.
873 /* Dummy dhd_info object */
874 dhd_info_t dhd_info_null = {
875 #if defined(BCM_GMAC3)
879 .info = &dhd_info_null,
880 #ifdef DHDTCPACK_SUPPRESS
881 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
882 #endif /* DHDTCPACK_SUPPRESS */
883 .up = FALSE, .busstate = DHD_BUS_DOWN
886 #define DHD_INFO_NULL (&dhd_info_null)
887 #define DHD_PUB_NULL (&dhd_info_null.pub)
889 /* Dummy netdevice object */
890 struct net_device dhd_net_dev_null = {
891 .reg_state = NETREG_UNREGISTERED
893 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
895 /* Dummy dhd_if object */
896 dhd_if_t dhd_if_null = {
897 #if defined(BCM_GMAC3)
901 .wmf = { .wmf_enable = TRUE },
903 .info = DHD_INFO_NULL,
904 .net = DHD_NET_DEV_NULL,
907 #define DHD_IF_NULL (&dhd_if_null)
909 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
911 /** Interface STA list management. */
913 /** Fetch the dhd_if object, given the interface index in the dhd. */
914 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
916 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
917 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
918 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
920 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
921 static void dhd_if_del_sta_list(dhd_if_t * ifp);
922 static void dhd_if_flush_sta(dhd_if_t * ifp);
924 /* Construct/Destruct a sta pool. */
925 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
926 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
927 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
930 /* Return interface pointer */
931 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
933 ASSERT(ifidx < DHD_MAX_IFS);
935 if (ifidx >= DHD_MAX_IFS)
938 return dhdp->info->iflist[ifidx];
941 /** Reset a dhd_sta object and free into the dhd pool. */
943 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
947 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
949 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
950 id16_map_free(dhdp->staid_allocator, sta->idx);
951 for (prio = 0; prio < (int)NUMPRIO; prio++)
952 sta->flowid[prio] = FLOWID_INVALID;
953 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
954 sta->ifidx = DHD_BAD_IF;
955 bzero(sta->ea.octet, ETHER_ADDR_LEN);
956 INIT_LIST_HEAD(&sta->list);
957 sta->idx = ID16_INVALID; /* implying free */
960 /** Allocate a dhd_sta object from the dhd pool. */
962 dhd_sta_alloc(dhd_pub_t * dhdp)
966 dhd_sta_pool_t * sta_pool;
968 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
970 idx = id16_map_alloc(dhdp->staid_allocator);
971 if (idx == ID16_INVALID) {
972 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
976 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
977 sta = &sta_pool[idx];
979 ASSERT((sta->idx == ID16_INVALID) &&
980 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
981 sta->idx = idx; /* implying allocated */
986 /** Delete all STAs in an interface's STA list. */
988 dhd_if_del_sta_list(dhd_if_t *ifp)
990 dhd_sta_t *sta, *next;
993 DHD_IF_STA_LIST_LOCK(ifp, flags);
995 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
996 #if defined(BCM_GMAC3)
998 /* Remove sta from WOFA forwarder. */
999 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1001 #endif /* BCM_GMAC3 */
1002 list_del(&sta->list);
1003 dhd_sta_free(&ifp->info->pub, sta);
1006 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1011 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1013 dhd_if_flush_sta(dhd_if_t * ifp)
1015 #if defined(BCM_GMAC3)
1017 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1018 dhd_sta_t *sta, *next;
1019 unsigned long flags;
1021 DHD_IF_STA_LIST_LOCK(ifp, flags);
1023 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1024 /* Remove any sta entry from WOFA forwarder. */
1025 fwder_flush(ifp->fwdh, (wofa_t)sta);
1028 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1030 #endif /* BCM_GMAC3 */
1033 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1035 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1037 int idx, sta_pool_memsz;
1039 dhd_sta_pool_t * sta_pool;
1040 void * staid_allocator;
1042 ASSERT(dhdp != (dhd_pub_t *)NULL);
1043 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1045 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1046 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1047 if (staid_allocator == NULL) {
1048 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1052 /* Pre allocate a pool of dhd_sta objects (one extra). */
1053 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1054 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1055 if (sta_pool == NULL) {
1056 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1057 id16_map_fini(dhdp->osh, staid_allocator);
1061 dhdp->sta_pool = sta_pool;
1062 dhdp->staid_allocator = staid_allocator;
1064 /* Initialize all sta(s) for the pre-allocated free pool. */
1065 bzero((uchar *)sta_pool, sta_pool_memsz);
1066 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1067 sta = &sta_pool[idx];
1068 sta->idx = id16_map_alloc(staid_allocator);
1069 ASSERT(sta->idx <= max_sta);
1071 /* Now place them into the pre-allocated free pool. */
1072 for (idx = 1; idx <= max_sta; idx++) {
1073 sta = &sta_pool[idx];
1074 dhd_sta_free(dhdp, sta);
1080 /** Destruct the pool of dhd_sta_t objects.
1081 * Caller must ensure that no STA objects are currently associated with an if.
1084 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1086 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1090 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1091 for (idx = 1; idx <= max_sta; idx++) {
1092 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1093 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1095 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1096 dhdp->sta_pool = NULL;
1099 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1100 dhdp->staid_allocator = NULL;
1103 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1105 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1107 int idx, sta_pool_memsz;
1109 dhd_sta_pool_t * sta_pool;
1110 void *staid_allocator;
1113 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1117 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1118 staid_allocator = dhdp->staid_allocator;
1121 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1125 if (!staid_allocator) {
1126 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1130 /* clear free pool */
1131 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1132 bzero((uchar *)sta_pool, sta_pool_memsz);
1134 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1135 id16_map_clear(staid_allocator, max_sta, 1);
1137 /* Initialize all sta(s) for the pre-allocated free pool. */
1138 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1139 sta = &sta_pool[idx];
1140 sta->idx = id16_map_alloc(staid_allocator);
1141 ASSERT(sta->idx <= max_sta);
1143 /* Now place them into the pre-allocated free pool. */
1144 for (idx = 1; idx <= max_sta; idx++) {
1145 sta = &sta_pool[idx];
1146 dhd_sta_free(dhdp, sta);
1150 /** Find STA with MAC address ea in an interface's STA list. */
1152 dhd_find_sta(void *pub, int ifidx, void *ea)
1156 unsigned long flags;
1159 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1161 return DHD_STA_NULL;
1163 DHD_IF_STA_LIST_LOCK(ifp, flags);
1165 list_for_each_entry(sta, &ifp->sta_list, list) {
1166 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1167 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1172 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1174 return DHD_STA_NULL;
1177 /** Add STA into the interface's STA list. */
1179 dhd_add_sta(void *pub, int ifidx, void *ea)
1183 unsigned long flags;
1186 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1188 return DHD_STA_NULL;
1190 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1191 if (sta == DHD_STA_NULL) {
1192 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1193 return DHD_STA_NULL;
1196 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1198 /* link the sta and the dhd interface */
1201 INIT_LIST_HEAD(&sta->list);
1203 DHD_IF_STA_LIST_LOCK(ifp, flags);
1205 list_add_tail(&sta->list, &ifp->sta_list);
1207 #if defined(BCM_GMAC3)
1209 ASSERT(ISALIGNED(ea, 2));
1210 /* Add sta to WOFA forwarder. */
1211 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1213 #endif /* BCM_GMAC3 */
1215 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1220 /** Delete STA from the interface's STA list. */
1222 dhd_del_sta(void *pub, int ifidx, void *ea)
1224 dhd_sta_t *sta, *next;
1226 unsigned long flags;
1229 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1233 DHD_IF_STA_LIST_LOCK(ifp, flags);
1235 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1236 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1237 #if defined(BCM_GMAC3)
1238 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1239 ASSERT(ISALIGNED(ea, 2));
1240 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1242 #endif /* BCM_GMAC3 */
1243 list_del(&sta->list);
1244 dhd_sta_free(&ifp->info->pub, sta);
1248 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1253 /** Add STA if it doesn't exist. Not reentrant. */
1255 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1259 sta = dhd_find_sta(pub, ifidx, ea);
1263 sta = dhd_add_sta(pub, ifidx, ea);
1269 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1270 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1271 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1272 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1273 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1274 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1275 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1276 #endif /* PCIE_FULL_DONGLE */
1279 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1280 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1283 dhd_info_t *dhd = dhdp->info;
1286 ASSERT(bssidx < DHD_MAX_IFS);
1289 for (i = 0; i < DHD_MAX_IFS; i++) {
1290 ifp = dhd->iflist[i];
1291 if (ifp && (ifp->bssidx == bssidx)) {
1292 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1293 ifp->name, bssidx, i));
1300 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1306 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1310 dhd_os_rxflock(dhdp);
1311 store_idx = dhdp->store_idx;
1312 sent_idx = dhdp->sent_idx;
1313 if (dhdp->skbbuf[store_idx] != NULL) {
1314 /* Make sure the previous packets are processed */
1315 dhd_os_rxfunlock(dhdp);
1316 #ifdef RXF_DEQUEUE_ON_BUSY
1317 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1318 skb, store_idx, sent_idx));
1320 #else /* RXF_DEQUEUE_ON_BUSY */
1321 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1322 skb, store_idx, sent_idx));
1323 /* removed msleep here, should use wait_event_timeout if we
1324 * want to give rx frame thread a chance to run
1326 #if defined(WAIT_DEQUEUE)
1330 #endif /* RXF_DEQUEUE_ON_BUSY */
1332 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1333 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1334 dhdp->skbbuf[store_idx] = skb;
1335 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1336 dhd_os_rxfunlock(dhdp);
1341 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1347 dhd_os_rxflock(dhdp);
1349 store_idx = dhdp->store_idx;
1350 sent_idx = dhdp->sent_idx;
1351 skb = dhdp->skbbuf[sent_idx];
1354 dhd_os_rxfunlock(dhdp);
1355 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1356 store_idx, sent_idx));
1360 dhdp->skbbuf[sent_idx] = NULL;
1361 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1363 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1366 dhd_os_rxfunlock(dhdp);
1371 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1373 #ifndef CUSTOMER_HW10
1374 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1375 #endif /* !CUSTOMER_HW10 */
1377 if (prepost) { /* pre process */
1378 dhd_read_macaddr(dhd);
1379 } else { /* post process */
1380 dhd_write_macaddr(&dhd->pub.mac);
1386 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1388 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
1390 bool _apply = FALSE;
1391 /* In case of IBSS mode, apply arp pkt filter */
1392 if (op_mode & DHD_FLAG_IBSS_MODE) {
1396 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1397 if ((dhd->arp_version == 1) &&
1398 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
1406 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1408 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1409 #ifdef PKT_FILTER_SUPPORT
1411 dhd_set_packet_filter_mode(struct net_device *dev, char *command)
1413 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1415 dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
1419 dhd_set_packet_filter_ports(struct net_device *dev, char *command)
1421 int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
1422 uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
1423 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1424 dhd_pub_t *dhdp = &dhdi->pub;
1425 char iovbuf[WLC_IOCTL_SMLEN];
1428 action = bcm_strtoul(command, &command, 0);
1429 if (action > PKT_FILTER_PORTS_MAX)
1432 if (action == PKT_FILTER_PORTS_LOOPBACK) {
1433 /* echo the loopback value if port filter is supported else error */
1434 bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
1435 error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
1437 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
1441 if (strstr(iovbuf, "pktfltr2"))
1442 return bcm_strtoul(command, &command, 0);
1444 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
1445 return BCME_UNSUPPORTED;
1449 if (action == PKT_FILTER_PORTS_CLEAR) {
1450 /* action 0 is clear all ports */
1451 dhdp->pkt_filter_ports_count = 0;
1452 bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
1455 portnum = bcm_strtoul(command, &command, 0);
1457 /* no ports to add or remove */
1461 /* get configured ports */
1462 count = dhdp->pkt_filter_ports_count;
1463 ports = dhdp->pkt_filter_ports;
1465 if (action == PKT_FILTER_PORTS_ADD) {
1466 /* action 1 is add ports */
1468 /* copy new ports */
1469 while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
1470 for (i = 0; i < count; i++) {
1471 /* duplicate port */
1472 if (portnum == ports[i])
1475 if (portnum != ports[i])
1476 ports[count++] = portnum;
1477 portnum = bcm_strtoul(command, &command, 0);
1479 } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
1480 /* action 2 is remove ports */
1481 bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
1484 while (portnum != 0) {
1486 for (i = 0; i < get_count; i++) {
1487 if (portnum != get_ports[i])
1488 ports[count++] = get_ports[i];
1491 bcopy(ports, get_ports, count * sizeof(uint16));
1492 portnum = bcm_strtoul(command, &command, 0);
1495 dhdp->pkt_filter_ports_count = count;
1501 dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
1504 wl_pkt_filter_ports_t *portlist = NULL;
1505 const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
1506 + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
1507 char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
1508 char iovbuf[pkt_filter_ports_buf_len];
1510 DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
1511 enable, dhd->in_suspend, dhd->pkt_filter_mode,
1512 dhd->pkt_filter_ports_count));
1514 bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
1515 portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
1516 portlist->version = WL_PKT_FILTER_PORTS_VERSION;
1517 portlist->reserved = 0;
1520 if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY))
1523 /* enable port filter */
1524 dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
1525 if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH)
1526 /* whitelist mode: FORWARD_ON_MATCH */
1527 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1529 /* blacklist mode: DISCARD_ON_MATCH */
1530 dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
1532 portlist->count = dhd->pkt_filter_ports_count;
1533 bcopy(dhd->pkt_filter_ports, portlist->ports,
1534 dhd->pkt_filter_ports_count * sizeof(uint16));
1536 /* disable port filter */
1537 portlist->count = 0;
1538 dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
1539 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1542 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
1546 bcm_mkiovar("pkt_filter_ports",
1548 (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
1549 iovbuf, sizeof(iovbuf));
1550 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1552 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
1555 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
1556 sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
1557 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1559 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
1563 #endif /* PKT_FILTER_SUPPORT */
1564 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1566 void dhd_set_packet_filter(dhd_pub_t *dhd)
1568 #ifdef PKT_FILTER_SUPPORT
1571 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1572 if (dhd_pkt_filter_enable) {
1573 for (i = 0; i < dhd->pktfilter_count; i++) {
1574 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1577 #endif /* PKT_FILTER_SUPPORT */
1580 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1582 #ifdef PKT_FILTER_SUPPORT
1585 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
1587 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1588 dhd_enable_packet_filter_ports(dhd, value);
1589 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1591 /* 1 - Enable packet filter, only allow unicast packet to send up */
1592 /* 0 - Disable packet filter */
1593 if (dhd_pkt_filter_enable && (!value ||
1594 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1596 for (i = 0; i < dhd->pktfilter_count; i++) {
1597 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1598 if (value && (i == DHD_ARP_FILTER_NUM) &&
1599 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1600 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1601 "val %d, cnt %d, op_mode 0x%x\n",
1602 value, i, dhd->op_mode));
1605 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1606 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1607 value, dhd_master_mode);
1610 #endif /* PKT_FILTER_SUPPORT */
1613 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1615 #ifndef SUPPORT_PM2_ONLY
1616 int power_mode = PM_MAX;
1617 #endif /* SUPPORT_PM2_ONLY */
1618 /* wl_pkt_filter_enable_t enable_parm; */
1620 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1621 uint roamvar = dhd->conf->roam_off_suspend;
1622 uint nd_ra_filter = 0;
1628 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1629 __FUNCTION__, value, dhd->in_suspend));
1631 dhd_suspend_lock(dhd);
1633 #ifdef CUSTOM_SET_CPUCORE
1634 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1635 /* set specific cpucore */
1636 dhd_set_cpucore(dhd, TRUE);
1637 #endif /* CUSTOM_SET_CPUCORE */
1638 #ifndef SUPPORT_PM2_ONLY
1639 if (dhd->conf->pm >= 0)
1640 power_mode = dhd->conf->pm;
1641 #endif /* SUPPORT_PM2_ONLY */
1643 if (value && dhd->in_suspend) {
1644 #ifdef PKT_FILTER_SUPPORT
1645 dhd->early_suspended = 1;
1647 /* Kernel suspended */
1648 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
1650 #ifndef SUPPORT_PM2_ONLY
1651 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1652 sizeof(power_mode), TRUE, 0);
1653 #endif /* SUPPORT_PM2_ONLY */
1655 /* Enable packet filter, only allow unicast packet to send up */
1656 dhd_enable_packet_filter(1, dhd);
1658 /* If DTIM skip is set up as default, force it to wake
1659 * each third DTIM for better power savings. Note that
1660 * one side effect is a chance to miss BC/MC packet.
1662 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1663 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1664 4, iovbuf, sizeof(iovbuf));
1665 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
1667 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1669 /* Disable firmware roaming during suspend */
1670 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1671 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1672 if (FW_SUPPORTED(dhd, ndoe)) {
1673 /* enable IPv6 RA filter in firmware during suspend */
1675 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1676 iovbuf, sizeof(iovbuf));
1677 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1678 sizeof(iovbuf), TRUE, 0)) < 0)
1679 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1683 #ifdef PKT_FILTER_SUPPORT
1684 dhd->early_suspended = 0;
1686 /* Kernel resumed */
1687 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
1689 #ifndef SUPPORT_PM2_ONLY
1690 power_mode = PM_FAST;
1691 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1692 sizeof(power_mode), TRUE, 0);
1693 #endif /* SUPPORT_PM2_ONLY */
1694 #ifdef PKT_FILTER_SUPPORT
1695 /* disable pkt filter */
1696 dhd_enable_packet_filter(0, dhd);
1697 #endif /* PKT_FILTER_SUPPORT */
1699 /* restore pre-suspend setting for dtim_skip */
1700 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1701 4, iovbuf, sizeof(iovbuf));
1703 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1704 roamvar = dhd_roam_disable;
1705 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1706 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1707 if (FW_SUPPORTED(dhd, ndoe)) {
1708 /* disable IPv6 RA filter in firmware during suspend */
1710 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1711 iovbuf, sizeof(iovbuf));
1712 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1713 sizeof(iovbuf), TRUE, 0)) < 0)
1714 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1719 dhd_suspend_unlock(dhd);
1724 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
1726 dhd_pub_t *dhdp = &dhd->pub;
1729 DHD_OS_WAKE_LOCK(dhdp);
1730 DHD_PERIM_LOCK(dhdp);
1732 /* Set flag when early suspend was called */
1733 dhdp->in_suspend = val;
1734 if ((force || !dhdp->suspend_disable_flag) &&
1735 dhd_support_sta_mode(dhdp))
1737 ret = dhd_set_suspend(val, dhdp);
1740 DHD_PERIM_UNLOCK(dhdp);
1741 DHD_OS_WAKE_UNLOCK(dhdp);
1745 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1746 static void dhd_early_suspend(struct early_suspend *h)
1748 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1749 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1752 dhd_suspend_resume_helper(dhd, 1, 0);
1755 static void dhd_late_resume(struct early_suspend *h)
1757 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1758 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1761 dhd_suspend_resume_helper(dhd, 0, 0);
1763 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1766 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1767 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1769 * dhd_timeout_start(&tmo, usec);
1770 * while (!dhd_timeout_expired(&tmo))
1771 * if (poll_something())
1773 * if (dhd_timeout_expired(&tmo))
1778 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
1783 tmo->tick = jiffies_to_usecs(1);
1787 dhd_timeout_expired(dhd_timeout_t *tmo)
1789 /* Does nothing the first call */
1790 if (tmo->increment == 0) {
1795 if (tmo->elapsed >= tmo->limit)
1798 /* Add the delay that's about to take place */
1799 tmo->elapsed += tmo->increment;
1801 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1802 OSL_DELAY(tmo->increment);
1803 tmo->increment *= 2;
1804 if (tmo->increment > tmo->tick)
1805 tmo->increment = tmo->tick;
1807 wait_queue_head_t delay_wait;
1808 DECLARE_WAITQUEUE(wait, current);
1809 init_waitqueue_head(&delay_wait);
1810 add_wait_queue(&delay_wait, &wait);
1811 set_current_state(TASK_INTERRUPTIBLE);
1812 (void)schedule_timeout(1);
1813 remove_wait_queue(&delay_wait, &wait);
1814 set_current_state(TASK_RUNNING);
1821 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1826 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
1829 while (i < DHD_MAX_IFS) {
1830 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1838 struct net_device * dhd_idx2net(void *pub, int ifidx)
1840 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1841 struct dhd_info *dhd_info;
1843 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1845 dhd_info = dhd_pub->info;
1846 if (dhd_info && dhd_info->iflist[ifidx])
1847 return dhd_info->iflist[ifidx]->net;
1852 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1854 int i = DHD_MAX_IFS;
1858 if (name == NULL || *name == '\0')
1862 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1865 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1867 return i; /* default - the primary interface */
1871 dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
1873 int i = DHD_MAX_IFS;
1878 if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
1881 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
1883 return i; /* default - the primary interface */
1887 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1889 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1893 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1894 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1898 if (dhd->iflist[ifidx] == NULL) {
1899 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1903 if (dhd->iflist[ifidx]->net)
1904 return dhd->iflist[ifidx]->net->name;
1910 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1913 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1916 for (i = 0; i < DHD_MAX_IFS; i++)
1917 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1918 return dhd->iflist[i]->mac_addr;
1925 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1927 struct net_device *dev;
1928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1929 struct netdev_hw_addr *ha;
1931 struct dev_mc_list *mclist;
1933 uint32 allmulti, cnt;
1940 ASSERT(dhd && dhd->iflist[ifidx]);
1941 dev = dhd->iflist[ifidx]->net;
1944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1945 netif_addr_lock_bh(dev);
1947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1948 cnt = netdev_mc_count(dev);
1950 cnt = dev->mc_count;
1951 #endif /* LINUX_VERSION_CODE */
1953 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1954 netif_addr_unlock_bh(dev);
1957 /* Determine initial value of allmulti flag */
1958 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
1960 /* Send down the multicast list first. */
1963 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
1964 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
1965 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1966 dhd_ifname(&dhd->pub, ifidx), cnt));
1970 strncpy(bufp, "mcast_list", buflen - 1);
1971 bufp[buflen - 1] = '\0';
1972 bufp += strlen("mcast_list") + 1;
1975 memcpy(bufp, &cnt, sizeof(cnt));
1976 bufp += sizeof(cnt);
1979 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1980 netif_addr_lock_bh(dev);
1982 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1983 netdev_for_each_mc_addr(ha, dev) {
1986 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
1987 bufp += ETHER_ADDR_LEN;
1991 for (mclist = dev->mc_list; (mclist && (cnt > 0));
1992 cnt--, mclist = mclist->next) {
1993 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
1994 bufp += ETHER_ADDR_LEN;
1996 #endif /* LINUX_VERSION_CODE */
1998 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1999 netif_addr_unlock_bh(dev);
2002 memset(&ioc, 0, sizeof(ioc));
2003 ioc.cmd = WLC_SET_VAR;
2008 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2010 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2011 dhd_ifname(&dhd->pub, ifidx), cnt));
2012 allmulti = cnt ? TRUE : allmulti;
2015 MFREE(dhd->pub.osh, buf, buflen);
2017 /* Now send the allmulti setting. This is based on the setting in the
2018 * net_device flags, but might be modified above to be turned on if we
2019 * were trying to set some addresses and dongle rejected it...
2022 buflen = sizeof("allmulti") + sizeof(allmulti);
2023 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
2024 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
2027 allmulti = htol32(allmulti);
2029 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
2030 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2031 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
2032 MFREE(dhd->pub.osh, buf, buflen);
2037 memset(&ioc, 0, sizeof(ioc));
2038 ioc.cmd = WLC_SET_VAR;
2043 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2045 DHD_ERROR(("%s: set allmulti %d failed\n",
2046 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2049 MFREE(dhd->pub.osh, buf, buflen);
2051 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2053 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2055 allmulti = htol32(allmulti);
2057 memset(&ioc, 0, sizeof(ioc));
2058 ioc.cmd = WLC_SET_PROMISC;
2059 ioc.buf = &allmulti;
2060 ioc.len = sizeof(allmulti);
2063 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2065 DHD_ERROR(("%s: set promisc %d failed\n",
2066 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2071 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2077 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
2078 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
2081 memset(&ioc, 0, sizeof(ioc));
2082 ioc.cmd = WLC_SET_VAR;
2087 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2089 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2091 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2093 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2100 extern struct net_device *ap_net_dev;
2101 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
2105 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2107 dhd_info_t *dhd = handle;
2108 dhd_if_event_t *if_event = event_info;
2109 struct net_device *ndev;
2112 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2113 struct wireless_dev *vwdev, *primary_wdev;
2114 struct net_device *primary_ndev;
2115 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2117 if (event != DHD_WQ_WORK_IF_ADD) {
2118 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2123 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2128 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2132 dhd_net_if_lock_local(dhd);
2133 DHD_OS_WAKE_LOCK(&dhd->pub);
2134 DHD_PERIM_LOCK(&dhd->pub);
2136 ifidx = if_event->event.ifidx;
2137 bssidx = if_event->event.bssidx;
2138 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2140 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2141 if_event->mac, bssidx, TRUE);
2143 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2147 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2148 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
2149 if (unlikely(!vwdev)) {
2150 WL_ERR(("Could not allocate wireless device\n"));
2153 primary_ndev = dhd->pub.info->iflist[0]->net;
2154 primary_wdev = ndev_to_wdev(primary_ndev);
2155 vwdev->wiphy = primary_wdev->wiphy;
2156 vwdev->iftype = if_event->event.role;
2157 vwdev->netdev = ndev;
2158 ndev->ieee80211_ptr = vwdev;
2159 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
2160 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
2161 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2163 DHD_PERIM_UNLOCK(&dhd->pub);
2164 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2165 DHD_PERIM_LOCK(&dhd->pub);
2166 if (ret != BCME_OK) {
2167 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2168 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2171 #ifdef PCIE_FULL_DONGLE
2172 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2173 if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
2174 char iovbuf[WLC_IOCTL_SMLEN];
2177 memset(iovbuf, 0, sizeof(iovbuf));
2178 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
2179 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
2181 if (ret != BCME_OK) {
2182 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2183 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2186 #endif /* PCIE_FULL_DONGLE */
2188 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2190 DHD_PERIM_UNLOCK(&dhd->pub);
2191 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2192 dhd_net_if_unlock_local(dhd);
2196 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2198 dhd_info_t *dhd = handle;
2200 dhd_if_event_t *if_event = event_info;
2203 if (event != DHD_WQ_WORK_IF_DEL) {
2204 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2209 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2214 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2218 dhd_net_if_lock_local(dhd);
2219 DHD_OS_WAKE_LOCK(&dhd->pub);
2220 DHD_PERIM_LOCK(&dhd->pub);
2222 ifidx = if_event->event.ifidx;
2223 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
2225 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2227 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2229 DHD_PERIM_UNLOCK(&dhd->pub);
2230 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2231 dhd_net_if_unlock_local(dhd);
2235 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
2237 dhd_info_t *dhd = handle;
2238 dhd_if_t *ifp = event_info;
2240 if (event != DHD_WQ_WORK_SET_MAC) {
2241 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2245 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2249 dhd_net_if_lock_local(dhd);
2250 DHD_OS_WAKE_LOCK(&dhd->pub);
2251 DHD_PERIM_LOCK(&dhd->pub);
2255 unsigned long flags;
2257 DHD_GENERAL_LOCK(&dhd->pub, flags);
2258 in_ap = (ap_net_dev != NULL);
2259 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2262 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2269 if (ifp == NULL || !dhd->pub.up) {
2270 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2274 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
2275 ifp->set_macaddress = FALSE;
2276 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
2277 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
2279 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
2282 DHD_PERIM_UNLOCK(&dhd->pub);
2283 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2284 dhd_net_if_unlock_local(dhd);
2288 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
2290 dhd_info_t *dhd = handle;
2291 dhd_if_t *ifp = event_info;
2294 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
2295 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2300 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2304 dhd_net_if_lock_local(dhd);
2305 DHD_OS_WAKE_LOCK(&dhd->pub);
2306 DHD_PERIM_LOCK(&dhd->pub);
2311 unsigned long flags;
2312 DHD_GENERAL_LOCK(&dhd->pub, flags);
2313 in_ap = (ap_net_dev != NULL);
2314 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2317 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2319 ifp->set_multicast = FALSE;
2325 if (ifp == NULL || !dhd->pub.up) {
2326 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2333 _dhd_set_multicast_list(dhd, ifidx);
2334 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
2337 DHD_PERIM_UNLOCK(&dhd->pub);
2338 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2339 dhd_net_if_unlock_local(dhd);
2343 dhd_set_mac_address(struct net_device *dev, void *addr)
2347 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2348 struct sockaddr *sa = (struct sockaddr *)addr;
2352 ifidx = dhd_net2idx(dhd, dev);
2353 if (ifidx == DHD_BAD_IF)
2356 dhdif = dhd->iflist[ifidx];
2358 dhd_net_if_lock_local(dhd);
2359 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
2360 dhdif->set_macaddress = TRUE;
2361 dhd_net_if_unlock_local(dhd);
2362 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
2363 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
2368 dhd_set_multicast_list(struct net_device *dev)
2370 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2373 ifidx = dhd_net2idx(dhd, dev);
2374 if (ifidx == DHD_BAD_IF)
2377 dhd->iflist[ifidx]->set_multicast = TRUE;
2378 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
2379 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
2382 #ifdef PROP_TXSTATUS
2384 dhd_os_wlfc_block(dhd_pub_t *pub)
2386 dhd_info_t *di = (dhd_info_t *)(pub->info);
2388 spin_lock_bh(&di->wlfc_spinlock);
2393 dhd_os_wlfc_unblock(dhd_pub_t *pub)
2395 dhd_info_t *di = (dhd_info_t *)(pub->info);
2398 spin_unlock_bh(&di->wlfc_spinlock);
2402 #endif /* PROP_TXSTATUS */
2404 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
2410 static const PKTTYPE_INFO packet_type_info[] =
2412 { ETHER_TYPE_IP, "IP" },
2413 { ETHER_TYPE_ARP, "ARP" },
2414 { ETHER_TYPE_BRCM, "BRCM" },
2415 { ETHER_TYPE_802_1X, "802.1X" },
2416 { ETHER_TYPE_WAI, "WAPI" },
2420 static const char *_get_packet_type_str(uint16 type)
2423 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
2425 for (i = 0; i < n; i++) {
2426 if (packet_type_info[i].type == type)
2427 return packet_type_info[i].str;
2430 return packet_type_info[n].str;
2432 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
2434 #if defined(DHD_TX_DUMP)
2436 dhd_tx_dump(osl_t *osh, void *pkt)
2440 struct ether_header *eh;
2442 dump_data = PKTDATA(osh, pkt);
2443 eh = (struct ether_header *) dump_data;
2444 protocol = ntoh16(eh->ether_type);
2446 DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol)));
2448 if (protocol == ETHER_TYPE_802_1X) {
2449 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2450 dump_data[14], dump_data[15], dump_data[30]));
2453 #if defined(DHD_TX_FULL_DUMP)
2457 datalen = PKTLEN(osh, pkt);
2459 for (i = 0; i < datalen; i++) {
2460 DHD_ERROR(("%02X ", dump_data[i]));
2466 #endif /* DHD_TX_FULL_DUMP */
2468 #endif /* DHD_TX_DUMP */
2471 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
2474 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2475 struct ether_header *eh = NULL;
2477 /* Reject if down */
2478 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
2479 /* free the packet here since the caller won't */
2480 PKTFREE(dhdp->osh, pktbuf, TRUE);
2484 #ifdef PCIE_FULL_DONGLE
2485 if (dhdp->busstate == DHD_BUS_SUSPEND) {
2486 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
2487 PKTFREE(dhdp->osh, pktbuf, TRUE);
2490 #endif /* PCIE_FULL_DONGLE */
2492 #ifdef DHD_UNICAST_DHCP
2493 /* if dhcp_unicast is enabled, we need to convert the */
2494 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2495 if (dhdp->dhcp_unicast) {
2496 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
2498 #endif /* DHD_UNICAST_DHCP */
2499 /* Update multicast statistic */
2500 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
2501 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
2502 eh = (struct ether_header *)pktdata;
2504 if (ETHER_ISMULTI(eh->ether_dhost))
2505 dhdp->tx_multicast++;
2506 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
2507 atomic_inc(&dhd->pend_8021x_cnt);
2509 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2513 /* Look into the packet and update the packet priority */
2514 #ifndef PKTPRIO_OVERRIDE
2515 if (PKTPRIO(pktbuf) == 0)
2517 pktsetprio(pktbuf, FALSE);
2520 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
2522 * Lkup the per interface hash table, for a matching flowring. If one is not
2523 * available, allocate a unique flowid and add a flowring entry.
2524 * The found or newly created flowid is placed into the pktbuf's tag.
2526 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
2527 if (ret != BCME_OK) {
2528 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
2532 #if defined(DHD_TX_DUMP)
2533 dhd_tx_dump(dhdp->osh, pktbuf);
2536 #ifdef PROP_TXSTATUS
2537 if (dhd_wlfc_is_supported(dhdp)) {
2538 /* store the interface ID */
2539 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
2541 /* store destination MAC in the tag as well */
2542 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
2544 /* decide which FIFO this packet belongs to */
2545 if (ETHER_ISMULTI(eh->ether_dhost))
2546 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
2547 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
2549 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
2551 #endif /* PROP_TXSTATUS */
2552 /* If the protocol uses a data header, apply it */
2553 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
2555 /* Use bus module to send data frame */
2557 dhd_htsf_addtxts(dhdp, pktbuf);
2560 #ifdef PROP_TXSTATUS
2562 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
2563 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
2564 /* non-proptxstatus way */
2566 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2568 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2569 #endif /* BCMPCIE */
2574 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2576 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2577 #endif /* BCMPCIE */
2578 #endif /* PROP_TXSTATUS */
2584 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
2589 dhd_info_t *dhd = DHD_DEV_INFO(net);
2590 dhd_if_t *ifp = NULL;
2593 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
2595 uint8 htsfdlystat_sz = 0;
2598 struct ether_header *eh;
2600 #endif /* DHD_WMF */
2602 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2604 DHD_OS_WAKE_LOCK(&dhd->pub);
2605 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2607 /* Reject if down */
2608 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
2609 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
2610 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
2611 netif_stop_queue(net);
2612 /* Send Event when bus down detected during data session */
2614 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
2615 net_os_send_hang_message(net);
2617 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2618 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2619 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2622 return NETDEV_TX_BUSY;
2626 ifp = DHD_DEV_IFP(net);
2627 ifidx = DHD_DEV_IFIDX(net);
2629 ASSERT(ifidx == dhd_net2idx(dhd, net));
2630 ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
2632 if (ifidx == DHD_BAD_IF) {
2633 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
2634 netif_stop_queue(net);
2635 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2636 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2637 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2640 return NETDEV_TX_BUSY;
2644 /* re-align socket buffer if "skb->data" is odd address */
2645 if (((unsigned long)(skb->data)) & 0x1) {
2646 unsigned char *data = skb->data;
2647 uint32 length = skb->len;
2648 PKTPUSH(dhd->pub.osh, skb, 1);
2649 memmove(skb->data, data, length);
2650 PKTSETLEN(dhd->pub.osh, skb, length);
2653 datalen = PKTLEN(dhd->pub.osh, skb);
2655 /* Make sure there's enough room for any header */
2657 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
2658 struct sk_buff *skb2;
2660 DHD_INFO(("%s: insufficient headroom\n",
2661 dhd_ifname(&dhd->pub, ifidx)));
2662 dhd->pub.tx_realloc++;
2664 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
2667 if ((skb = skb2) == NULL) {
2668 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
2669 dhd_ifname(&dhd->pub, ifidx)));
2675 /* Convert to packet */
2676 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
2677 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
2678 dhd_ifname(&dhd->pub, ifidx)));
2679 dev_kfree_skb_any(skb);
2684 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
2685 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
2686 struct ether_header *eh = (struct ether_header *)pktdata;
2688 if (!ETHER_ISMULTI(eh->ether_dhost) &&
2689 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
2690 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
2695 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
2696 iph = (uint8 *)eh + ETHER_HDR_LEN;
2698 /* WMF processing for multicast packets
2699 * Only IPv4 packets are handled
2701 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
2702 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
2703 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
2704 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2706 bool ucast_convert = FALSE;
2707 #ifdef DHD_UCAST_UPNP
2710 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
2711 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
2712 #endif /* DHD_UCAST_UPNP */
2713 #ifdef DHD_IGMP_UCQUERY
2714 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
2715 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
2716 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
2717 #endif /* DHD_IGMP_UCQUERY */
2718 if (ucast_convert) {
2720 unsigned long flags;
2722 DHD_IF_STA_LIST_LOCK(ifp, flags);
2724 /* Convert upnp/igmp query to unicast for each assoc STA */
2725 list_for_each_entry(sta, &ifp->sta_list, list) {
2726 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
2727 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2728 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2729 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2732 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
2735 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2736 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2737 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2739 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2740 return NETDEV_TX_OK;
2742 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
2744 /* There will be no STA info if the packet is coming from LAN host
2747 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
2751 /* Either taken by WMF or we should drop it.
2754 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2755 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2756 return NETDEV_TX_OK;
2758 /* Continue the transmit path */
2763 #endif /* DHD_WMF */
2765 #ifdef DHDTCPACK_SUPPRESS
2766 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
2767 /* If this packet has been hold or got freed, just return */
2768 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx))
2771 /* If this packet has replaced another packet and got freed, just return */
2772 if (dhd_tcpack_suppress(&dhd->pub, pktbuf))
2775 #endif /* DHDTCPACK_SUPPRESS */
2777 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
2781 ifp->stats.tx_dropped++;
2782 dhd->pub.tx_dropped++;
2786 #ifdef PROP_TXSTATUS
2787 /* tx_packets counter can counted only when wlfc is disabled */
2788 if (!dhd_wlfc_is_supported(&dhd->pub))
2791 dhd->pub.tx_packets++;
2792 ifp->stats.tx_packets++;
2793 ifp->stats.tx_bytes += datalen;
2797 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2798 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2800 /* Return ok: we always eat the packet */
2801 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2804 return NETDEV_TX_OK;
2810 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
2812 struct net_device *net;
2813 dhd_info_t *dhd = dhdp->info;
2816 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2820 if (ifidx == ALL_INTERFACES) {
2821 /* Flow control on all active interfaces */
2822 dhdp->txoff = state;
2823 for (i = 0; i < DHD_MAX_IFS; i++) {
2824 if (dhd->iflist[i]) {
2825 net = dhd->iflist[i]->net;
2827 netif_stop_queue(net);
2829 netif_wake_queue(net);
2834 if (dhd->iflist[ifidx]) {
2835 net = dhd->iflist[ifidx]->net;
2837 netif_stop_queue(net);
2839 netif_wake_queue(net);
2847 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
2849 dhd_info_t *dhd = dhdp->info;
2851 return dhd->rxthread_enabled;
2853 #endif /* DHD_WMF */
2856 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
2858 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2859 struct sk_buff *skb;
2862 void *data, *pnext = NULL;
2865 wl_event_msg_t event;
2868 void *skbhead = NULL;
2869 void *skbprev = NULL;
2870 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2873 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2875 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2877 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
2878 struct ether_header *eh;
2880 struct dot11_llc_snap_header *lsh;
2883 pnext = PKTNEXT(dhdp->osh, pktbuf);
2884 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
2886 ifp = dhd->iflist[ifidx];
2888 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
2890 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2894 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2896 /* Dropping only data packets before registering net device to avoid kernel panic */
2897 #ifndef PROP_TXSTATUS_VSDB
2898 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
2899 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2901 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
2902 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2903 #endif /* PROP_TXSTATUS_VSDB */
2905 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
2907 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2912 lsh = (struct dot11_llc_snap_header *)&eh[1];
2914 if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
2915 (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
2916 bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
2917 lsh->type == HTON16(BTA_PROT_L2CAP)) {
2918 amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
2919 ((uint8 *)eh + RFC1042_HDR_LEN);
2922 #endif /* WLBTAMP */
2924 #ifdef PROP_TXSTATUS
2925 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
2926 /* WLFC may send header only packet when
2927 there is an urgent message but no packet to
2930 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2934 #ifdef DHD_L2_FILTER
2935 /* If block_ping is enabled drop the ping packet */
2936 if (dhdp->block_ping) {
2937 if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
2938 PKTFREE(dhdp->osh, pktbuf, FALSE);
2944 /* WMF processing for multicast packets */
2945 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
2949 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
2950 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
2953 /* The packet is taken by WMF. Continue to next iteration */
2956 /* Packet DROP decision by WMF. Toss it */
2957 DHD_ERROR(("%s: WMF decides to drop packet\n",
2959 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2962 /* Continue the transmit path */
2966 #endif /* DHD_WMF */
2967 #ifdef DHDTCPACK_SUPPRESS
2968 dhd_tcpdata_info_get(dhdp, pktbuf);
2970 skb = PKTTONATIVE(dhdp->osh, pktbuf);
2972 ifp = dhd->iflist[ifidx];
2974 ifp = dhd->iflist[0];
2977 skb->dev = ifp->net;
2979 #ifdef PCIE_FULL_DONGLE
2980 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
2981 (!ifp->ap_isolate)) {
2982 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2983 if (ETHER_ISUCAST(eh->ether_dhost)) {
2984 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
2985 dhd_sendpkt(dhdp, ifidx, pktbuf);
2989 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
2990 dhd_sendpkt(dhdp, ifidx, npktbuf);
2993 #endif /* PCIE_FULL_DONGLE */
2995 /* Get the protocol, maintain skb around eth_type_trans()
2996 * The main reason for this hack is for the limitation of
2997 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
2998 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
2999 * coping of the packet coming from the network stack to add
3000 * BDC, Hardware header etc, during network interface registration
3001 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
3002 * for BDC, Hardware header etc. and not just the ETH_HLEN
3007 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3008 dump_data = skb->data;
3009 protocol = (dump_data[12] << 8) | dump_data[13];
3011 if (protocol == ETHER_TYPE_802_1X) {
3012 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3013 "ver %d, type %d, replay %d\n",
3014 dump_data[14], dump_data[15],
3017 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3018 #if defined(DHD_RX_DUMP)
3019 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
3020 if (protocol != ETHER_TYPE_BRCM) {
3021 if (dump_data[0] == 0xFF) {
3022 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
3024 if ((dump_data[12] == 8) &&
3025 (dump_data[13] == 6)) {
3026 DHD_ERROR(("%s: ARP %d\n",
3027 __FUNCTION__, dump_data[0x15]));
3029 } else if (dump_data[0] & 1) {
3030 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
3031 __FUNCTION__, MAC2STRDBG(dump_data)));
3033 #ifdef DHD_RX_FULL_DUMP
3036 for (k = 0; k < skb->len; k++) {
3037 DHD_ERROR(("%02X ", dump_data[k]));
3043 #endif /* DHD_RX_FULL_DUMP */
3045 #endif /* DHD_RX_DUMP */
3047 skb->protocol = eth_type_trans(skb, skb->dev);
3049 if (skb->pkt_type == PACKET_MULTICAST) {
3050 dhd->pub.rx_multicast++;
3051 ifp->stats.multicast++;
3058 dhd_htsf_addrxts(dhdp, pktbuf);
3060 /* Strip header, count, deliver upward */
3061 skb_pull(skb, ETH_HLEN);
3063 /* Process special event packets and then discard them */
3064 memset(&event, 0, sizeof(event));
3065 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
3066 dhd_wl_host_event(dhd, &ifidx,
3067 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3068 skb_mac_header(skb),
3071 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3075 wl_event_to_host_order(&event);
3077 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
3079 if (event.event_type == WLC_E_BTA_HCI_EVENT) {
3080 dhd_bta_doevt(dhdp, data, event.datalen);
3082 #endif /* WLBTAMP */
3084 #if defined(PNO_SUPPORT)
3085 if (event.event_type == WLC_E_PFN_NET_FOUND) {
3086 /* enforce custom wake lock to garantee that Kernel not suspended */
3087 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
3089 #endif /* PNO_SUPPORT */
3091 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3092 PKTFREE(dhdp->osh, pktbuf, FALSE);
3094 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3096 tout_rx = DHD_PACKET_TIMEOUT_MS;
3098 #ifdef PROP_TXSTATUS
3099 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
3100 #endif /* PROP_TXSTATUS */
3103 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
3104 ifp = dhd->iflist[ifidx];
3107 ifp->net->last_rx = jiffies;
3109 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
3110 dhdp->dstats.rx_bytes += skb->len;
3111 dhdp->rx_packets++; /* Local count */
3112 ifp->stats.rx_bytes += skb->len;
3113 ifp->stats.rx_packets++;
3115 #if defined(DHD_TCP_WINSIZE_ADJUST)
3116 if (dhd_use_tcp_window_size_adjust) {
3117 if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
3118 dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
3121 #endif /* DHD_TCP_WINSIZE_ADJUST */
3123 if (in_interrupt()) {
3126 if (dhd->rxthread_enabled) {
3130 PKTSETNEXT(dhdp->osh, skbprev, skb);
3134 /* If the receive is not processed inside an ISR,
3135 * the softirqd must be woken explicitly to service
3136 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3137 * by netif_rx_ni(), but in earlier kernels, we need
3138 * to do it manually.
3140 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3145 local_irq_save(flags);
3147 local_irq_restore(flags);
3148 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3153 if (dhd->rxthread_enabled && skbhead)
3154 dhd_sched_rxf(dhdp, skbhead);
3156 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
3157 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
3161 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
3163 /* Linux version has nothing to do */
3168 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
3170 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3171 struct ether_header *eh;
3177 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
3179 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
3180 type = ntoh16(eh->ether_type);
3182 if (type == ETHER_TYPE_802_1X)
3183 atomic_dec(&dhd->pend_8021x_cnt);
3186 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
3187 * If yes generate packet completion event.
3189 len = PKTLEN(dhdp->osh, txp);
3191 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
3192 if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
3193 struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
3195 if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
3196 ntoh16(lsh->type) == BTA_PROT_L2CAP) {
3198 dhd_bta_tx_hcidata_complete(dhdp, txp, success);
3201 #endif /* WLBTAMP */
3202 #ifdef PROP_TXSTATUS
3203 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
3204 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
3205 uint datalen = PKTLEN(dhd->pub.osh, txp);
3208 dhd->pub.tx_packets++;
3209 ifp->stats.tx_packets++;
3210 ifp->stats.tx_bytes += datalen;
3212 ifp->stats.tx_dropped++;
3218 static struct net_device_stats *
3219 dhd_get_stats(struct net_device *net)
3221 dhd_info_t *dhd = DHD_DEV_INFO(net);
3225 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3227 ifidx = dhd_net2idx(dhd, net);
3228 if (ifidx == DHD_BAD_IF) {
3229 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
3231 memset(&net->stats, 0, sizeof(net->stats));
3235 ifp = dhd->iflist[ifidx];
3239 /* Use the protocol to get dongle stats */
3240 dhd_prot_dstats(&dhd->pub);
3246 dhd_watchdog_thread(void *data)
3248 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3249 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3250 /* This thread doesn't need any user-level access,
3251 * so get rid of all our resources
3253 if (dhd_watchdog_prio > 0) {
3254 struct sched_param param;
3255 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
3256 dhd_watchdog_prio:(MAX_RT_PRIO-1);
3257 setScheduler(current, SCHED_FIFO, ¶m);
3261 if (down_interruptible (&tsk->sema) == 0) {
3262 unsigned long flags;
3263 unsigned long jiffies_at_start = jiffies;
3264 unsigned long time_lapse;
3266 SMP_RD_BARRIER_DEPENDS();
3267 if (tsk->terminated) {
3271 if (dhd->pub.dongle_reset == FALSE) {
3272 DHD_TIMER(("%s:\n", __FUNCTION__));
3274 /* Call the bus module watchdog */
3275 dhd_bus_watchdog(&dhd->pub);
3278 DHD_GENERAL_LOCK(&dhd->pub, flags);
3279 /* Count the tick for reference */
3281 time_lapse = jiffies - jiffies_at_start;
3283 /* Reschedule the watchdog */
3284 if (dhd->wd_timer_valid)
3285 mod_timer(&dhd->timer,
3287 msecs_to_jiffies(dhd_watchdog_ms) -
3288 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
3289 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3295 complete_and_exit(&tsk->completed, 0);
3298 static void dhd_watchdog(ulong data)
3300 dhd_info_t *dhd = (dhd_info_t *)data;
3301 unsigned long flags;
3303 if (dhd->pub.dongle_reset) {
3307 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
3308 up(&dhd->thr_wdt_ctl.sema);
3312 /* Call the bus module watchdog */
3313 dhd_bus_watchdog(&dhd->pub);
3315 DHD_GENERAL_LOCK(&dhd->pub, flags);
3316 /* Count the tick for reference */
3319 /* Reschedule the watchdog */
3320 if (dhd->wd_timer_valid)
3321 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
3322 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3326 #ifdef ENABLE_ADAPTIVE_SCHED
3328 dhd_sched_policy(int prio)
3330 struct sched_param param;
3331 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
3332 param.sched_priority = 0;
3333 setScheduler(current, SCHED_NORMAL, ¶m);
3335 if (get_scheduler_policy(current) != SCHED_FIFO) {
3336 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
3337 setScheduler(current, SCHED_FIFO, ¶m);
3341 #endif /* ENABLE_ADAPTIVE_SCHED */
3342 #ifdef DEBUG_CPU_FREQ
3343 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
3345 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
3346 struct cpufreq_freqs *freq = data;
3350 if (val == CPUFREQ_POSTCHANGE) {
3351 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3352 freq->new, freq->cpu));
3353 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
3359 #endif /* DEBUG_CPU_FREQ */
3361 dhd_dpc_thread(void *data)
3363 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3364 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3366 /* This thread doesn't need any user-level access,
3367 * so get rid of all our resources
3369 if (dhd_dpc_prio > 0)
3371 struct sched_param param;
3372 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
3373 setScheduler(current, SCHED_FIFO, ¶m);
3376 #ifdef CUSTOM_DPC_CPUCORE
3377 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
3379 if (dhd->pub.conf->dpc_cpucore >= 0) {
3380 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
3381 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
3384 #ifdef CUSTOM_SET_CPUCORE
3385 dhd->pub.current_dpc = current;
3386 #endif /* CUSTOM_SET_CPUCORE */
3387 /* Run until signal received */
3389 if (!binary_sema_down(tsk)) {
3390 #ifdef ENABLE_ADAPTIVE_SCHED
3391 dhd_sched_policy(dhd_dpc_prio);
3392 #endif /* ENABLE_ADAPTIVE_SCHED */
3393 SMP_RD_BARRIER_DEPENDS();
3394 if (tsk->terminated) {
3398 /* Call bus dpc unless it indicated down (then clean stop) */
3399 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3400 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
3401 while (dhd_bus_dpc(dhd->pub.bus)) {
3402 /* process all data */
3404 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
3405 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3409 dhd_bus_stop(dhd->pub.bus, TRUE);
3410 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3416 complete_and_exit(&tsk->completed, 0);
3420 dhd_rxf_thread(void *data)
3422 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3423 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3424 #if defined(WAIT_DEQUEUE)
3425 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
3426 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
3428 dhd_pub_t *pub = &dhd->pub;
3430 /* This thread doesn't need any user-level access,
3431 * so get rid of all our resources
3433 if (dhd_rxf_prio > 0)
3435 struct sched_param param;
3436 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
3437 setScheduler(current, SCHED_FIFO, ¶m);
3440 DAEMONIZE("dhd_rxf");
3441 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
3443 /* signal: thread has started */
3444 complete(&tsk->completed);
3445 #ifdef CUSTOM_SET_CPUCORE
3446 dhd->pub.current_rxf = current;
3447 #endif /* CUSTOM_SET_CPUCORE */
3448 /* Run until signal received */
3450 if (down_interruptible(&tsk->sema) == 0) {
3452 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
3455 #ifdef ENABLE_ADAPTIVE_SCHED
3456 dhd_sched_policy(dhd_rxf_prio);
3457 #endif /* ENABLE_ADAPTIVE_SCHED */
3459 SMP_RD_BARRIER_DEPENDS();
3461 if (tsk->terminated) {
3464 skb = dhd_rxf_dequeue(pub);
3470 void *skbnext = PKTNEXT(pub->osh, skb);
3471 PKTSETNEXT(pub->osh, skb, NULL);
3473 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3477 local_irq_save(flags);
3479 local_irq_restore(flags);
3484 #if defined(WAIT_DEQUEUE)
3485 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
3487 watchdogTime = OSL_SYSUPTIME();
3491 DHD_OS_WAKE_UNLOCK(pub);
3496 complete_and_exit(&tsk->completed, 0);
3500 void dhd_dpc_kill(dhd_pub_t *dhdp)
3512 tasklet_kill(&dhd->tasklet);
3513 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
3515 #endif /* BCMPCIE */
3522 dhd = (dhd_info_t *)data;
3524 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
3525 * down below , wake lock is set,
3526 * the tasklet is initialized in dhd_attach()
3528 /* Call bus dpc unless it indicated down (then clean stop) */
3529 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3530 if (dhd_bus_dpc(dhd->pub.bus))
3531 tasklet_schedule(&dhd->tasklet);
3533 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3535 dhd_bus_stop(dhd->pub.bus, TRUE);
3536 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3541 dhd_sched_dpc(dhd_pub_t *dhdp)
3543 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3545 DHD_OS_WAKE_LOCK(dhdp);
3546 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
3547 /* If the semaphore does not get up,
3548 * wake unlock should be done here
3550 if (!binary_sema_up(&dhd->thr_dpc_ctl))
3551 DHD_OS_WAKE_UNLOCK(dhdp);
3554 tasklet_schedule(&dhd->tasklet);
3559 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
3561 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3562 #ifdef RXF_DEQUEUE_ON_BUSY
3565 #endif /* RXF_DEQUEUE_ON_BUSY */
3567 DHD_OS_WAKE_LOCK(dhdp);
3569 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
3570 #ifdef RXF_DEQUEUE_ON_BUSY
3572 ret = dhd_rxf_enqueue(dhdp, skb);
3573 if (ret == BCME_OK || ret == BCME_ERROR)
3576 OSL_SLEEP(50); /* waiting for dequeueing */
3577 } while (retry-- > 0);
3579 if (retry <= 0 && ret == BCME_BUSY) {
3583 void *skbnext = PKTNEXT(dhdp->osh, skbp);
3584 PKTSETNEXT(dhdp->osh, skbp, NULL);
3588 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
3591 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3592 up(&dhd->thr_rxf_ctl.sema);
3595 #else /* RXF_DEQUEUE_ON_BUSY */
3597 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
3600 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3601 up(&dhd->thr_rxf_ctl.sema);
3604 #endif /* RXF_DEQUEUE_ON_BUSY */
3608 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
3610 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
3616 memset(&ioc, 0, sizeof(ioc));
3618 ioc.cmd = WLC_GET_VAR;
3620 ioc.len = (uint)sizeof(buf);
3623 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3624 buf[sizeof(buf) - 1] = '\0';
3625 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3626 /* Check for older dongle image that doesn't support toe_ol */
3628 DHD_ERROR(("%s: toe not supported by device\n",
3629 dhd_ifname(&dhd->pub, ifidx)));
3633 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3637 memcpy(toe_ol, buf, sizeof(uint32));
3641 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
3643 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
3649 memset(&ioc, 0, sizeof(ioc));
3651 ioc.cmd = WLC_SET_VAR;
3653 ioc.len = (uint)sizeof(buf);
3656 /* Set toe_ol as requested */
3658 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3659 buf[sizeof(buf) - 1] = '\0';
3660 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
3662 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3663 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
3664 dhd_ifname(&dhd->pub, ifidx), ret));
3668 /* Enable toe globally only if any components are enabled. */
3670 toe = (toe_ol != 0);
3673 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
3675 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3676 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3684 #if defined(WL_CFG80211)
3685 void dhd_set_scb_probe(dhd_pub_t *dhd)
3687 #define NUM_SCB_MAX_PROBE 3
3689 wl_scb_probe_t scb_probe;
3690 char iovbuf[WL_EVENTING_MASK_LEN + 12];
3692 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
3694 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
3697 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
3699 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3700 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
3702 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
3704 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
3706 bcm_mkiovar("scb_probe", (char *)&scb_probe,
3707 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
3708 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3709 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
3710 #undef NUM_SCB_MAX_PROBE
3713 #endif /* WL_CFG80211 */
3715 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3717 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
3719 dhd_info_t *dhd = DHD_DEV_INFO(net);
3721 snprintf(info->driver, sizeof(info->driver), "wl");
3722 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
3725 struct ethtool_ops dhd_ethtool_ops = {
3726 .get_drvinfo = dhd_ethtool_get_drvinfo
3728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
3731 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3733 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
3735 struct ethtool_drvinfo info;
3736 char drvname[sizeof(info.driver)];
3739 struct ethtool_value edata;
3740 uint32 toe_cmpnt, csum_dir;
3744 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3746 /* all ethtool calls start with a cmd word */
3747 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
3751 case ETHTOOL_GDRVINFO:
3752 /* Copy out any request driver name */
3753 if (copy_from_user(&info, uaddr, sizeof(info)))
3755 strncpy(drvname, info.driver, sizeof(info.driver));
3756 drvname[sizeof(info.driver)-1] = '\0';
3758 /* clear struct for return */
3759 memset(&info, 0, sizeof(info));
3762 /* if dhd requested, identify ourselves */
3763 if (strcmp(drvname, "?dhd") == 0) {
3764 snprintf(info.driver, sizeof(info.driver), "dhd");
3765 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
3766 info.version[sizeof(info.version) - 1] = '\0';
3769 /* otherwise, require dongle to be up */
3770 else if (!dhd->pub.up) {
3771 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
3775 /* finally, report dongle driver type */
3776 else if (dhd->pub.iswl)
3777 snprintf(info.driver, sizeof(info.driver), "wl");
3779 snprintf(info.driver, sizeof(info.driver), "xx");
3781 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
3782 if (copy_to_user(uaddr, &info, sizeof(info)))
3784 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
3785 (int)sizeof(drvname), drvname, info.driver));
3789 /* Get toe offload components from dongle */
3790 case ETHTOOL_GRXCSUM:
3791 case ETHTOOL_GTXCSUM:
3792 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3795 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3798 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
3800 if (copy_to_user(uaddr, &edata, sizeof(edata)))
3804 /* Set toe offload components in dongle */
3805 case ETHTOOL_SRXCSUM:
3806 case ETHTOOL_STXCSUM:
3807 if (copy_from_user(&edata, uaddr, sizeof(edata)))
3810 /* Read the current settings, update and write back */
3811 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3814 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3816 if (edata.data != 0)
3817 toe_cmpnt |= csum_dir;
3819 toe_cmpnt &= ~csum_dir;
3821 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
3824 /* If setting TX checksum mode, tell Linux the new mode */
3825 if (cmd == ETHTOOL_STXCSUM) {
3827 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
3829 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
3841 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3843 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
3848 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3855 dhd = (dhd_info_t *)dhdp->info;
3856 #if !defined(BCMPCIE)
3857 if (dhd->thr_dpc_ctl.thr_pid < 0) {
3858 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
3863 #ifdef CONFIG_MACH_UNIVERSAL5433
3864 /* old revision does not send hang message */
3865 if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
3867 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
3868 #endif /* CONFIG_MACH_UNIVERSAL5433 */
3869 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
3870 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
3871 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
3872 net_os_send_hang_message(net);
3878 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
3880 int bcmerror = BCME_OK;
3882 struct net_device *net;
3884 net = dhd_idx2net(pub, ifidx);
3886 bcmerror = BCME_BADARG;
3891 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
3893 /* check for local dhd ioctl and handle it */
3894 if (ioc->driver == DHD_IOCTL_MAGIC) {
3895 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
3897 pub->bcmerror = bcmerror;
3901 /* send to dongle (must be up, and wl). */
3902 if (pub->busstate != DHD_BUS_DATA) {
3903 bcmerror = BCME_DONGLE_DOWN;
3908 bcmerror = BCME_DONGLE_DOWN;
3913 * Flush the TX queue if required for proper message serialization:
3914 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
3915 * prevent M4 encryption and
3916 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
3917 * prevent disassoc frame being sent before WPS-DONE frame.
3919 if (ioc->cmd == WLC_SET_KEY ||
3920 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3921 strncmp("wsec_key", data_buf, 9) == 0) ||
3922 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3923 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
3924 ioc->cmd == WLC_DISASSOC)
3925 dhd_wait_pend8021x(net);
3929 /* short cut wl ioctl calls here */
3930 if (strcmp("htsf", data_buf) == 0) {
3931 dhd_ioctl_htsf_get(dhd, 0);
3935 if (strcmp("htsflate", data_buf) == 0) {
3937 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
3938 memset(&maxdelayts, 0, sizeof(tstamp_t));
3942 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3943 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3944 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3945 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3951 if (strcmp("htsfclear", data_buf) == 0) {
3952 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3953 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3954 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3955 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3959 if (strcmp("htsfhis", data_buf) == 0) {
3960 dhd_dump_htsfhisto(&vi_d1, "H to D");
3961 dhd_dump_htsfhisto(&vi_d2, "D to D");
3962 dhd_dump_htsfhisto(&vi_d3, "D to H");
3963 dhd_dump_htsfhisto(&vi_d4, "H to H");
3966 if (strcmp("tsport", data_buf) == 0) {
3968 memcpy(&tsport, data_buf + 7, 4);
3970 DHD_ERROR(("current timestamp port: %d \n", tsport));
3975 #endif /* WLMEDIA_HTSF */
3977 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
3978 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
3980 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3982 bcmerror = BCME_UNSUPPORTED;
3986 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3989 dhd_check_hang(net, pub, bcmerror);
3995 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
3997 dhd_info_t *dhd = DHD_DEV_INFO(net);
4002 void *local_buf = NULL;
4005 DHD_OS_WAKE_LOCK(&dhd->pub);
4006 DHD_PERIM_LOCK(&dhd->pub);
4008 /* Interface up check for built-in type */
4009 if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
4010 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
4011 DHD_PERIM_UNLOCK(&dhd->pub);
4012 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4016 /* send to dongle only if we are not waiting for reload already */
4017 if (dhd->pub.hang_was_sent) {
4018 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
4019 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
4020 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4021 return OSL_ERROR(BCME_DONGLE_DOWN);
4024 ifidx = dhd_net2idx(dhd, net);
4025 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
4027 if (ifidx == DHD_BAD_IF) {
4028 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
4029 DHD_PERIM_UNLOCK(&dhd->pub);
4030 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4034 #if defined(WL_WIRELESS_EXT)
4035 /* linux wireless extensions */
4036 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
4037 /* may recurse, do NOT lock */
4038 ret = wl_iw_ioctl(net, ifr, cmd);
4039 DHD_PERIM_UNLOCK(&dhd->pub);
4040 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4043 #endif /* defined(WL_WIRELESS_EXT) */
4045 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4046 if (cmd == SIOCETHTOOL) {
4047 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
4048 DHD_PERIM_UNLOCK(&dhd->pub);
4049 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4052 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4054 if (cmd == SIOCDEVPRIVATE+1) {
4055 ret = wl_android_priv_cmd(net, ifr, cmd);
4056 dhd_check_hang(net, &dhd->pub, ret);
4057 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4061 if (cmd != SIOCDEVPRIVATE) {
4062 DHD_PERIM_UNLOCK(&dhd->pub);
4063 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4067 memset(&ioc, 0, sizeof(ioc));
4069 #ifdef CONFIG_COMPAT
4070 if (is_compat_task()) {
4071 compat_wl_ioctl_t compat_ioc;
4072 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
4073 bcmerror = BCME_BADADDR;
4076 ioc.cmd = compat_ioc.cmd;
4077 ioc.buf = compat_ptr(compat_ioc.buf);
4078 ioc.len = compat_ioc.len;
4079 ioc.set = compat_ioc.set;
4080 ioc.used = compat_ioc.used;
4081 ioc.needed = compat_ioc.needed;
4082 /* To differentiate between wl and dhd read 4 more byes */
4083 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
4084 sizeof(uint)) != 0)) {
4085 bcmerror = BCME_BADADDR;
4089 #endif /* CONFIG_COMPAT */
4091 /* Copy the ioc control structure part of ioctl request */
4092 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
4093 bcmerror = BCME_BADADDR;
4097 /* To differentiate between wl and dhd read 4 more byes */
4098 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
4099 sizeof(uint)) != 0)) {
4100 bcmerror = BCME_BADADDR;
4105 if (!capable(CAP_NET_ADMIN)) {
4106 bcmerror = BCME_EPERM;
4111 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
4112 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
4113 bcmerror = BCME_NOMEM;
4117 DHD_PERIM_UNLOCK(&dhd->pub);
4118 if (copy_from_user(local_buf, ioc.buf, buflen)) {
4119 DHD_PERIM_LOCK(&dhd->pub);
4120 bcmerror = BCME_BADADDR;
4123 DHD_PERIM_LOCK(&dhd->pub);
4125 *(char *)(local_buf + buflen) = '\0';
4128 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
4130 if (!bcmerror && buflen && local_buf && ioc.buf) {
4131 DHD_PERIM_UNLOCK(&dhd->pub);
4132 if (copy_to_user(ioc.buf, local_buf, buflen))
4134 DHD_PERIM_LOCK(&dhd->pub);
4139 MFREE(dhd->pub.osh, local_buf, buflen+1);
4141 DHD_PERIM_UNLOCK(&dhd->pub);
4142 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4144 return OSL_ERROR(bcmerror);
4147 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
4148 int dhd_deepsleep(dhd_info_t *dhd, int flag)
4159 case 1 : /* Deepsleep on */
4160 DHD_ERROR(("dhd_deepsleep: ON\n"));
4161 /* give some time to sysioc_work before deepsleep */
4163 #ifdef PKT_FILTER_SUPPORT
4164 /* disable pkt filter */
4165 dhd_enable_packet_filter(0, dhdp);
4166 #endif /* PKT_FILTER_SUPPORT */
4169 memset(iovbuf, 0, sizeof(iovbuf));
4170 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4171 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4173 /* Enable Deepsleep */
4175 memset(iovbuf, 0, sizeof(iovbuf));
4176 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4177 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4180 case 0: /* Deepsleep Off */
4181 DHD_ERROR(("dhd_deepsleep: OFF\n"));
4183 /* Disable Deepsleep */
4184 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
4186 memset(iovbuf, 0, sizeof(iovbuf));
4187 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4188 iovbuf, sizeof(iovbuf));
4189 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
4190 sizeof(iovbuf), TRUE, 0);
4192 memset(iovbuf, 0, sizeof(iovbuf));
4193 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4194 iovbuf, sizeof(iovbuf));
4195 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
4196 sizeof(iovbuf), FALSE, 0)) < 0) {
4197 DHD_ERROR(("the error of dhd deepsleep status"
4198 " ret value :%d\n", ret));
4200 if (!(*(int *)iovbuf)) {
4201 DHD_ERROR(("deepsleep mode is 0,"
4202 " count: %d\n", cnt));
4210 memset(iovbuf, 0, sizeof(iovbuf));
4211 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4212 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4220 dhd_stop(struct net_device *net)
4223 dhd_info_t *dhd = DHD_DEV_INFO(net);
4224 DHD_OS_WAKE_LOCK(&dhd->pub);
4225 DHD_PERIM_LOCK(&dhd->pub);
4226 printf("%s: Enter %p\n", __FUNCTION__, net);
4227 if (dhd->pub.up == 0) {
4231 dhd_if_flush_sta(DHD_DEV_IFP(net));
4234 ifidx = dhd_net2idx(dhd, net);
4235 BCM_REFERENCE(ifidx);
4237 /* Set state and stop OS transmissions */
4238 netif_stop_queue(net);
4243 wl_cfg80211_down(NULL);
4246 * For CFG80211: Clean up all the left over virtual interfaces
4247 * when the primary Interface is brought down. [ifconfig wlan0 down]
4249 if (!dhd_download_fw_on_driverload) {
4250 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
4251 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
4254 dhd_net_if_lock_local(dhd);
4255 for (i = 1; i < DHD_MAX_IFS; i++)
4256 dhd_remove_if(&dhd->pub, i, FALSE);
4257 dhd_net_if_unlock_local(dhd);
4261 #endif /* WL_CFG80211 */
4263 #ifdef PROP_TXSTATUS
4264 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
4266 /* Stop the protocol module */
4267 dhd_prot_stop(&dhd->pub);
4269 OLD_MOD_DEC_USE_COUNT;
4271 if (ifidx == 0 && !dhd_download_fw_on_driverload)
4272 wl_android_wifi_off(net);
4274 if (dhd->pub.conf->deepsleep)
4275 dhd_deepsleep(dhd, 1);
4277 dhd->pub.rxcnt_timeout = 0;
4278 dhd->pub.txcnt_timeout = 0;
4280 dhd->pub.hang_was_sent = 0;
4282 /* Clear country spec for for built-in type driver */
4283 if (!dhd_download_fw_on_driverload) {
4284 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
4285 dhd->pub.dhd_cspec.rev = 0;
4286 dhd->pub.dhd_cspec.ccode[0] = 0x00;
4289 printf("%s: Exit\n", __FUNCTION__);
4290 DHD_PERIM_UNLOCK(&dhd->pub);
4291 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4295 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
4296 extern bool g_first_broadcast_scan;
4300 static int dhd_interworking_enable(dhd_pub_t *dhd)
4302 char iovbuf[WLC_IOCTL_SMLEN];
4303 uint32 enable = true;
4306 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
4307 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4308 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
4311 if (ret == BCME_OK) {
4312 /* basic capabilities for HS20 REL2 */
4313 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
4314 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
4315 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4316 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4317 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
4326 dhd_open(struct net_device *net)
4328 dhd_info_t *dhd = DHD_DEV_INFO(net);
4335 printf("%s: Enter %p\n", __FUNCTION__, net);
4336 #if defined(MULTIPLE_SUPPLICANT)
4337 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4338 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4339 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
4341 mutex_lock(&_dhd_sdio_mutex_lock_);
4343 #endif /* MULTIPLE_SUPPLICANT */
4345 DHD_OS_WAKE_LOCK(&dhd->pub);
4346 DHD_PERIM_LOCK(&dhd->pub);
4347 dhd->pub.dongle_trap_occured = 0;
4348 dhd->pub.hang_was_sent = 0;
4352 * Force start if ifconfig_up gets called before START command
4353 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4354 * This should be removed in the future
4356 ret = wl_control_wl_start(net);
4358 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4364 ifidx = dhd_net2idx(dhd, net);
4365 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4368 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
4373 if (!dhd->iflist[ifidx]) {
4374 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
4380 atomic_set(&dhd->pend_8021x_cnt, 0);
4381 if (!dhd_download_fw_on_driverload) {
4382 DHD_ERROR(("\n%s\n", dhd_version));
4383 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
4384 g_first_broadcast_scan = TRUE;
4386 ret = wl_android_wifi_on(net);
4388 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
4389 __FUNCTION__, ret));
4395 if (dhd->pub.busstate != DHD_BUS_DATA) {
4397 /* try to bring up bus */
4398 DHD_PERIM_UNLOCK(&dhd->pub);
4399 ret = dhd_bus_start(&dhd->pub);
4400 DHD_PERIM_LOCK(&dhd->pub);
4402 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4408 if (dhd_download_fw_on_driverload) {
4409 if (dhd->pub.conf->deepsleep)
4410 dhd_deepsleep(dhd, 0);
4413 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
4414 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
4417 /* Get current TOE mode from dongle */
4418 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
4419 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
4421 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
4424 #if defined(WL_CFG80211)
4425 if (unlikely(wl_cfg80211_up(NULL))) {
4426 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
4430 dhd_set_scb_probe(&dhd->pub);
4431 #endif /* WL_CFG80211 */
4434 /* Allow transmit calls */
4435 netif_start_queue(net);
4439 dhd_dbg_init(&dhd->pub);
4442 OLD_MOD_INC_USE_COUNT;
4447 DHD_PERIM_UNLOCK(&dhd->pub);
4448 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4450 #if defined(MULTIPLE_SUPPLICANT)
4451 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4452 mutex_unlock(&_dhd_sdio_mutex_lock_);
4454 #endif /* MULTIPLE_SUPPLICANT */
4456 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
4460 int dhd_do_driver_init(struct net_device *net)
4462 dhd_info_t *dhd = NULL;
4465 DHD_ERROR(("Primary Interface not initialized \n"));
4469 #ifdef MULTIPLE_SUPPLICANT
4470 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
4471 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4472 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
4475 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
4476 #endif /* MULTIPLE_SUPPLICANT */
4478 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
4479 dhd = DHD_DEV_INFO(net);
4481 /* If driver is already initialized, do nothing
4483 if (dhd->pub.busstate == DHD_BUS_DATA) {
4484 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
4488 if (dhd_open(net) < 0) {
4489 DHD_ERROR(("Driver Init Failed \n"));
4497 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4501 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4505 /* handle IF event caused by wl commands, SoftAP, WEXT and
4506 * anything else. This has to be done asynchronously otherwise
4507 * DPC will be blocked (and iovars will timeout as DPC has no chance
4508 * to read the response back)
4510 if (ifevent->ifidx > 0) {
4511 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4513 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4514 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4515 strncpy(if_event->name, name, IFNAMSIZ);
4516 if_event->name[IFNAMSIZ - 1] = '\0';
4517 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
4518 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
4525 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4527 dhd_if_event_t *if_event;
4529 #if defined(WL_CFG80211) && !defined(P2PONEINT)
4530 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4532 #endif /* WL_CFG80211 */
4534 /* handle IF event caused by wl commands, SoftAP, WEXT and
4537 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4538 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4539 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4540 strncpy(if_event->name, name, IFNAMSIZ);
4541 if_event->name[IFNAMSIZ - 1] = '\0';
4542 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
4543 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
4548 /* unregister and free the existing net_device interface (if any) in iflist and
4549 * allocate a new one. the slot is reused. this function does NOT register the
4550 * new interface to linux kernel. dhd_register_if does the job
4553 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
4554 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
4556 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4559 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
4560 ifp = dhdinfo->iflist[ifidx];
4563 if (ifp->net != NULL) {
4564 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
4566 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
4568 /* in unregister_netdev case, the interface gets freed by net->destructor
4569 * (which is set to free_netdev)
4571 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4572 free_netdev(ifp->net);
4574 netif_stop_queue(ifp->net);
4576 unregister_netdev(ifp->net);
4578 unregister_netdevice(ifp->net);
4583 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
4585 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
4590 memset(ifp, 0, sizeof(dhd_if_t));
4591 ifp->info = dhdinfo;
4593 ifp->bssidx = bssidx;
4595 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
4597 /* Allocate etherdev, including space for private structure */
4598 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
4599 if (ifp->net == NULL) {
4600 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
4604 /* Setup the dhd interface's netdevice private structure. */
4605 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
4607 if (name && name[0]) {
4608 strncpy(ifp->net->name, name, IFNAMSIZ);
4609 ifp->net->name[IFNAMSIZ - 1] = '\0';
4613 ifp->net->destructor = free_netdev;
4615 ifp->net->destructor = dhd_netdev_free;
4617 ifp->net->destructor = free_netdev;
4618 #endif /* WL_CFG80211 */
4619 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
4620 ifp->name[IFNAMSIZ - 1] = '\0';
4621 dhdinfo->iflist[ifidx] = ifp;
4623 #ifdef PCIE_FULL_DONGLE
4624 /* Initialize STA info list */
4625 INIT_LIST_HEAD(&ifp->sta_list);
4626 DHD_IF_STA_LIST_LOCK_INIT(ifp);
4627 #endif /* PCIE_FULL_DONGLE */
4633 if (ifp->net != NULL) {
4634 dhd_dev_priv_clear(ifp->net);
4635 free_netdev(ifp->net);
4638 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4641 dhdinfo->iflist[ifidx] = NULL;
4645 /* unregister and free the the net_device interface associated with the indexed
4646 * slot, also free the slot memory and set the slot pointer to NULL
4649 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
4651 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4654 ifp = dhdinfo->iflist[ifidx];
4656 if (ifp->net != NULL) {
4657 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
4659 /* in unregister_netdev case, the interface gets freed by net->destructor
4660 * (which is set to free_netdev)
4662 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4663 free_netdev(ifp->net);
4665 netif_stop_queue(ifp->net);
4670 custom_rps_map_clear(ifp->net->_rx);
4671 #endif /* SET_RPS_CPUS */
4673 unregister_netdev(ifp->net);
4675 unregister_netdevice(ifp->net);
4680 dhd_wmf_cleanup(dhdpub, ifidx);
4681 #endif /* DHD_WMF */
4683 dhd_if_del_sta_list(ifp);
4685 dhdinfo->iflist[ifidx] = NULL;
4686 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4693 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4694 static struct net_device_ops dhd_ops_pri = {
4695 .ndo_open = dhd_open,
4696 .ndo_stop = dhd_stop,
4697 .ndo_get_stats = dhd_get_stats,
4698 .ndo_do_ioctl = dhd_ioctl_entry,
4699 .ndo_start_xmit = dhd_start_xmit,
4700 .ndo_set_mac_address = dhd_set_mac_address,
4701 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4702 .ndo_set_rx_mode = dhd_set_multicast_list,
4704 .ndo_set_multicast_list = dhd_set_multicast_list,
4708 static struct net_device_ops dhd_ops_virt = {
4709 .ndo_get_stats = dhd_get_stats,
4710 .ndo_do_ioctl = dhd_ioctl_entry,
4711 .ndo_start_xmit = dhd_start_xmit,
4712 .ndo_set_mac_address = dhd_set_mac_address,
4713 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4714 .ndo_set_rx_mode = dhd_set_multicast_list,
4716 .ndo_set_multicast_list = dhd_set_multicast_list,
4721 extern int wl_cfgp2p_if_open(struct net_device *net);
4722 extern int wl_cfgp2p_if_stop(struct net_device *net);
4724 static struct net_device_ops dhd_cfgp2p_ops_virt = {
4725 .ndo_open = wl_cfgp2p_if_open,
4726 .ndo_stop = wl_cfgp2p_if_stop,
4727 .ndo_get_stats = dhd_get_stats,
4728 .ndo_do_ioctl = dhd_ioctl_entry,
4729 .ndo_start_xmit = dhd_start_xmit,
4730 .ndo_set_mac_address = dhd_set_mac_address,
4731 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4732 .ndo_set_rx_mode = dhd_set_multicast_list,
4734 .ndo_set_multicast_list = dhd_set_multicast_list,
4737 #endif /* P2PONEINT */
4738 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
4741 extern void debugger_init(void *bus_handle);
4745 #ifdef SHOW_LOGTRACE
4746 static char *logstrs_path = "/root/logstrs.bin";
4747 module_param(logstrs_path, charp, S_IRUGO);
4750 dhd_init_logstrs_array(dhd_event_log_t *temp)
4752 struct file *filep = NULL;
4755 char *raw_fmts = NULL;
4756 int logstrs_size = 0;
4758 logstr_header_t *hdr = NULL;
4759 uint32 *lognums = NULL;
4760 char *logstrs = NULL;
4768 filep = filp_open(logstrs_path, O_RDONLY, 0);
4769 if (IS_ERR(filep)) {
4770 DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__));
4773 error = vfs_stat(logstrs_path, &stat);
4775 DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__));
4778 logstrs_size = (int) stat.size;
4780 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
4781 if (raw_fmts == NULL) {
4782 DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
4785 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
4786 DHD_ERROR(("Error: Log strings file read failed\n"));
4790 /* Remember header from the logstrs.bin file */
4791 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
4792 sizeof(logstr_header_t));
4794 if (hdr->log_magic == LOGSTRS_MAGIC) {
4796 * logstrs.bin start with header.
4798 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
4799 ram_index = (hdr->ram_lognums_offset -
4800 hdr->rom_lognums_offset) / sizeof(uint32);
4801 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
4802 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
4805 * Legacy logstrs.bin format without header.
4807 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
4808 if (num_fmts == 0) {
4809 /* Legacy ROM/RAM logstrs.bin format:
4810 * - ROM 'lognums' section
4811 * - RAM 'lognums' section
4812 * - ROM 'logstrs' section.
4813 * - RAM 'logstrs' section.
4815 * 'lognums' is an array of indexes for the strings in the
4816 * 'logstrs' section. The first uint32 is 0 (index of first
4817 * string in ROM 'logstrs' section).
4819 * The 4324b5 is the only ROM that uses this legacy format. Use the
4820 * fixed number of ROM fmtnums to find the start of the RAM
4821 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
4822 * find the ROM 'logstrs' section.
4824 #define NUM_4324B5_ROM_FMTS 186
4825 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
4826 ram_index = NUM_4324B5_ROM_FMTS;
4827 lognums = (uint32 *) raw_fmts;
4828 num_fmts = ram_index;
4829 logstrs = (char *) &raw_fmts[num_fmts << 2];
4830 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
4832 logstrs = (char *) &raw_fmts[num_fmts << 2];
4835 /* Legacy RAM-only logstrs.bin format:
4836 * - RAM 'lognums' section
4837 * - RAM 'logstrs' section.
4839 * 'lognums' is an array of indexes for the strings in the
4840 * 'logstrs' section. The first uint32 is an index to the
4841 * start of 'logstrs'. Therefore, if this index is divided
4842 * by 'sizeof(uint32)' it provides the number of logstr
4846 lognums = (uint32 *) raw_fmts;
4847 logstrs = (char *) &raw_fmts[num_fmts << 2];
4850 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
4852 DHD_ERROR(("Failed to allocate fmts memory\n"));
4856 for (i = 0; i < num_fmts; i++) {
4857 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
4858 * (they are 0-indexed relative to 'rom_logstrs_offset').
4860 * RAM lognums are already indexed to point to the correct RAM logstrs (they
4861 * are 0-indexed relative to the start of the logstrs.bin file).
4863 if (i == ram_index) {
4866 fmts[i] = &logstrs[lognums[i]];
4869 temp->raw_fmts = raw_fmts;
4870 temp->num_fmts = num_fmts;
4871 filp_close(filep, NULL);
4880 filp_close(filep, NULL);
4885 #endif /* SHOW_LOGTRACE */
4889 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
4891 dhd_info_t *dhd = NULL;
4892 struct net_device *net = NULL;
4893 char if_name[IFNAMSIZ] = {'\0'};
4894 uint32 bus_type = -1;
4895 uint32 bus_num = -1;
4896 uint32 slot_num = -1;
4897 wifi_adapter_info_t *adapter = NULL;
4899 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
4900 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4902 /* will implement get_ids for DBUS later */
4903 #if defined(BCMSDIO)
4904 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
4906 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
4908 /* Allocate primary dhd_info */
4909 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
4911 dhd = MALLOC(osh, sizeof(dhd_info_t));
4913 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
4917 memset(dhd, 0, sizeof(dhd_info_t));
4918 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
4920 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
4923 dhd->adapter = adapter;
4925 #ifdef GET_CUSTOM_MAC_ENABLE
4926 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
4927 #endif /* GET_CUSTOM_MAC_ENABLE */
4928 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
4929 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
4931 /* Initialize thread based operation and lock */
4932 sema_init(&dhd->sdsem, 1);
4934 /* Link to info module */
4935 dhd->pub.info = dhd;
4938 /* Link to bus module */
4940 dhd->pub.hdrlen = bus_hdrlen;
4942 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
4943 * because dhd_detech will check .info is NULL or not.
4945 if (dhd_conf_attach(&dhd->pub) != 0) {
4946 DHD_ERROR(("dhd_conf_attach failed\n"));
4949 dhd_conf_reset(&dhd->pub);
4950 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
4951 dhd_conf_preinit(&dhd->pub);
4953 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
4954 * This is indeed a hack but we have to make it work properly before we have a better
4957 dhd_update_fw_nv_path(dhd);
4958 #ifndef BUILD_IN_KERNEL
4959 dhd_conf_read_config(&dhd->pub, dhd->conf_path);
4962 /* Set network interface name if it was provided as module parameter */
4963 if (iface_name[0]) {
4966 strncpy(if_name, iface_name, IFNAMSIZ);
4967 if_name[IFNAMSIZ - 1] = 0;
4968 len = strlen(if_name);
4969 ch = if_name[len - 1];
4970 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
4971 strcat(if_name, "%d");
4973 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
4976 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
4978 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4981 net->netdev_ops = NULL;
4984 sema_init(&dhd->proto_sem, 1);
4986 #ifdef PROP_TXSTATUS
4987 spin_lock_init(&dhd->wlfc_spinlock);
4989 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
4990 dhd->pub.plat_init = dhd_wlfc_plat_init;
4991 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
4992 #endif /* PROP_TXSTATUS */
4994 /* Initialize other structure content */
4995 init_waitqueue_head(&dhd->ioctl_resp_wait);
4996 init_waitqueue_head(&dhd->ctrl_wait);
4998 /* Initialize the spinlocks */
4999 spin_lock_init(&dhd->sdlock);
5000 spin_lock_init(&dhd->txqlock);
5001 spin_lock_init(&dhd->dhd_lock);
5002 spin_lock_init(&dhd->rxf_lock);
5003 #if defined(RXFRAME_THREAD)
5004 dhd->rxthread_enabled = TRUE;
5005 #endif /* defined(RXFRAME_THREAD) */
5007 #ifdef DHDTCPACK_SUPPRESS
5008 spin_lock_init(&dhd->tcpack_lock);
5009 #endif /* DHDTCPACK_SUPPRESS */
5011 /* Initialize Wakelock stuff */
5012 spin_lock_init(&dhd->wakelock_spinlock);
5013 dhd->wakelock_counter = 0;
5014 dhd->wakelock_wd_counter = 0;
5015 dhd->wakelock_rx_timeout_enable = 0;
5016 dhd->wakelock_ctrl_timeout_enable = 0;
5017 #ifdef CONFIG_HAS_WAKELOCK
5018 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
5019 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
5020 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
5021 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
5022 #ifdef BCMPCIE_OOB_HOST_WAKE
5023 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
5024 #endif /* BCMPCIE_OOB_HOST_WAKE */
5025 #endif /* CONFIG_HAS_WAKELOCK */
5026 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
5027 mutex_init(&dhd->dhd_net_if_mutex);
5028 mutex_init(&dhd->dhd_suspend_mutex);
5030 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
5032 /* Attach and link in the protocol */
5033 if (dhd_prot_attach(&dhd->pub) != 0) {
5034 DHD_ERROR(("dhd_prot_attach failed\n"));
5037 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
5040 /* Attach and link in the cfg80211 */
5041 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
5042 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5046 dhd_monitor_init(&dhd->pub);
5047 dhd_state |= DHD_ATTACH_STATE_CFG80211;
5049 #if defined(WL_WIRELESS_EXT)
5050 /* Attach and link in the iw */
5051 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5052 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
5053 DHD_ERROR(("wl_iw_attach failed\n"));
5056 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
5058 #endif /* defined(WL_WIRELESS_EXT) */
5060 #ifdef SHOW_LOGTRACE
5061 dhd_init_logstrs_array(&dhd->event_data);
5062 #endif /* SHOW_LOGTRACE */
5064 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
5065 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
5070 /* Set up the watchdog timer */
5071 init_timer(&dhd->timer);
5072 dhd->timer.data = (ulong)dhd;
5073 dhd->timer.function = dhd_watchdog;
5074 dhd->default_wd_interval = dhd_watchdog_ms;
5076 if (dhd_watchdog_prio >= 0) {
5077 /* Initialize watchdog thread */
5078 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
5081 dhd->thr_wdt_ctl.thr_pid = -1;
5085 debugger_init((void *) bus);
5088 /* Set up the bottom half handler */
5089 if (dhd_dpc_prio >= 0) {
5090 /* Initialize DPC thread */
5091 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
5093 /* use tasklet for dpc */
5094 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
5095 dhd->thr_dpc_ctl.thr_pid = -1;
5098 if (dhd->rxthread_enabled) {
5099 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
5100 /* Initialize RXF thread */
5101 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
5104 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
5106 #if defined(CONFIG_PM_SLEEP)
5107 if (!dhd_pm_notifier_registered) {
5108 dhd_pm_notifier_registered = TRUE;
5109 register_pm_notifier(&dhd_pm_notifier);
5111 #endif /* CONFIG_PM_SLEEP */
5113 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5114 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
5115 dhd->early_suspend.suspend = dhd_early_suspend;
5116 dhd->early_suspend.resume = dhd_late_resume;
5117 register_early_suspend(&dhd->early_suspend);
5118 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
5119 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5121 #ifdef ARP_OFFLOAD_SUPPORT
5122 dhd->pend_ipaddr = 0;
5123 if (!dhd_inetaddr_notifier_registered) {
5124 dhd_inetaddr_notifier_registered = TRUE;
5125 register_inetaddr_notifier(&dhd_inetaddr_notifier);
5127 #endif /* ARP_OFFLOAD_SUPPORT */
5129 if (!dhd_inet6addr_notifier_registered) {
5130 dhd_inet6addr_notifier_registered = TRUE;
5131 register_inet6addr_notifier(&dhd_inet6addr_notifier);
5134 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
5135 #ifdef DEBUG_CPU_FREQ
5136 dhd->new_freq = alloc_percpu(int);
5137 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
5138 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
5140 #ifdef DHDTCPACK_SUPPRESS
5142 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
5143 #elif defined(BCMPCIE)
5144 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
5146 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5147 #endif /* BCMSDIO */
5148 #endif /* DHDTCPACK_SUPPRESS */
5150 dhd_state |= DHD_ATTACH_STATE_DONE;
5151 dhd->dhd_state = dhd_state;
5154 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
5156 #endif /* CUSTOMER_HW20 && WLANAUDIO */
5160 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
5161 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5162 __FUNCTION__, dhd_state, &dhd->pub));
5163 dhd->dhd_state = dhd_state;
5164 dhd_detach(&dhd->pub);
5165 dhd_free(&dhd->pub);
5171 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
5173 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
5174 return DHD_FLAG_HOSTAP_MODE;
5175 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
5176 return DHD_FLAG_P2P_MODE;
5177 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
5178 return DHD_FLAG_IBSS_MODE;
5179 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
5180 return DHD_FLAG_MFG_MODE;
5182 return DHD_FLAG_STA_MODE;
5185 extern int rkwifi_set_firmware(char *fw, char *nvram);
5187 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
5192 const char *fw = NULL;
5193 const char *nv = NULL;
5194 const char *conf = NULL;
5195 char firmware[100] = {0};
5196 char nvram[100] = {0};
5197 wifi_adapter_info_t *adapter = dhdinfo->adapter;
5200 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5201 * The path from adapter info is used for initialization only (as it won't change).
5203 * The firmware_path/nvram_path module parameter may be changed by the system at run
5204 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5205 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5206 * module parameter after it is copied. We won't update the path until the module parameter
5207 * is changed again (first character is not '\0')
5210 /* set default firmware and nvram path for built-in type driver */
5211 // if (!dhd_download_fw_on_driverload) {
5212 rkwifi_set_firmware(firmware, nvram);
5213 #ifdef CONFIG_BCMDHD_FW_PATH
5214 fw = CONFIG_BCMDHD_FW_PATH;
5217 #endif /* CONFIG_BCMDHD_FW_PATH */
5218 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5219 nv = CONFIG_BCMDHD_NVRAM_PATH;
5222 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5225 /* check if we need to initialize the path */
5226 if (dhdinfo->fw_path[0] == '\0') {
5227 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
5228 fw = adapter->fw_path;
5231 if (dhdinfo->nv_path[0] == '\0') {
5232 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
5233 nv = adapter->nv_path;
5235 if (dhdinfo->conf_path[0] == '\0') {
5236 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
5237 conf = adapter->conf_path;
5240 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5242 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5244 if (firmware_path[0] != '\0')
5246 if (nvram_path[0] != '\0')
5248 if (config_path[0] != '\0')
5251 if (fw && fw[0] != '\0') {
5252 fw_len = strlen(fw);
5253 if (fw_len >= sizeof(dhdinfo->fw_path)) {
5254 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5257 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
5258 if (dhdinfo->fw_path[fw_len-1] == '\n')
5259 dhdinfo->fw_path[fw_len-1] = '\0';
5261 if (nv && nv[0] != '\0') {
5262 nv_len = strlen(nv);
5263 if (nv_len >= sizeof(dhdinfo->nv_path)) {
5264 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5267 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
5268 if (dhdinfo->nv_path[nv_len-1] == '\n')
5269 dhdinfo->nv_path[nv_len-1] = '\0';
5271 if (conf && conf[0] != '\0') {
5272 conf_len = strlen(conf);
5273 if (conf_len >= sizeof(dhdinfo->conf_path)) {
5274 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
5277 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
5278 if (dhdinfo->conf_path[conf_len-1] == '\n')
5279 dhdinfo->conf_path[conf_len-1] = '\0';
5283 /* clear the path in module parameter */
5284 firmware_path[0] = '\0';
5285 nvram_path[0] = '\0';
5286 config_path[0] = '\0';
5289 #ifndef BCMEMBEDIMAGE
5290 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5291 if (dhdinfo->fw_path[0] == '\0') {
5292 DHD_ERROR(("firmware path not found\n"));
5295 if (dhdinfo->nv_path[0] == '\0') {
5296 DHD_ERROR(("nvram path not found\n"));
5299 if (dhdinfo->conf_path[0] == '\0') {
5300 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
5302 #endif /* BCMEMBEDIMAGE */
5309 dhd_bus_start(dhd_pub_t *dhdp)
5312 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
5313 unsigned long flags;
5317 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
5319 DHD_PERIM_LOCK(dhdp);
5321 /* try to download image and nvram to the dongle */
5322 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
5323 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
5324 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
5325 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
5326 dhd->fw_path, dhd->nv_path, dhd->conf_path);
5328 DHD_ERROR(("%s: failed to download firmware %s\n",
5329 __FUNCTION__, dhd->fw_path));
5330 DHD_PERIM_UNLOCK(dhdp);
5334 if (dhd->pub.busstate != DHD_BUS_LOAD) {
5335 DHD_PERIM_UNLOCK(dhdp);
5339 dhd_os_sdlock(dhdp);
5341 /* Start the watchdog timer */
5342 dhd->pub.tickcnt = 0;
5343 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
5345 /* Bring up the bus */
5346 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
5348 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
5349 dhd_os_sdunlock(dhdp);
5350 DHD_PERIM_UNLOCK(dhdp);
5353 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
5354 #if defined(BCMPCIE_OOB_HOST_WAKE)
5355 dhd_os_sdunlock(dhdp);
5356 #endif /* BCMPCIE_OOB_HOST_WAKE */
5357 /* Host registration for OOB interrupt */
5358 if (dhd_bus_oob_intr_register(dhdp)) {
5359 /* deactivate timer and wait for the handler to finish */
5360 #if !defined(BCMPCIE_OOB_HOST_WAKE)
5361 DHD_GENERAL_LOCK(&dhd->pub, flags);
5362 dhd->wd_timer_valid = FALSE;
5363 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5364 del_timer_sync(&dhd->timer);
5366 dhd_os_sdunlock(dhdp);
5367 #endif /* BCMPCIE_OOB_HOST_WAKE */
5368 DHD_PERIM_UNLOCK(dhdp);
5369 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5370 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
5374 #if defined(BCMPCIE_OOB_HOST_WAKE)
5375 dhd_os_sdlock(dhdp);
5376 dhd_bus_oob_intr_set(dhdp, TRUE);
5378 /* Enable oob at firmware */
5379 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
5380 #endif /* BCMPCIE_OOB_HOST_WAKE */
5382 #ifdef PCIE_FULL_DONGLE
5385 uint32 num_flowrings; /* includes H2D common rings */
5386 num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
5387 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
5389 if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
5390 dhd_os_sdunlock(dhdp);
5391 DHD_PERIM_UNLOCK(dhdp);
5395 #endif /* PCIE_FULL_DONGLE */
5397 /* Do protocol initialization necessary for IOCTL/IOVAR */
5398 dhd_prot_init(&dhd->pub);
5400 /* If bus is not ready, can't come up */
5401 if (dhd->pub.busstate != DHD_BUS_DATA) {
5402 DHD_GENERAL_LOCK(&dhd->pub, flags);
5403 dhd->wd_timer_valid = FALSE;
5404 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5405 del_timer_sync(&dhd->timer);
5406 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
5407 dhd_os_sdunlock(dhdp);
5408 DHD_PERIM_UNLOCK(dhdp);
5409 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5413 dhd_os_sdunlock(dhdp);
5415 /* Bus is ready, query any dongle information */
5416 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
5417 DHD_PERIM_UNLOCK(dhdp);
5421 #ifdef ARP_OFFLOAD_SUPPORT
5422 if (dhd->pend_ipaddr) {
5423 #ifdef AOE_IP_ALIAS_SUPPORT
5424 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
5425 #endif /* AOE_IP_ALIAS_SUPPORT */
5426 dhd->pend_ipaddr = 0;
5428 #endif /* ARP_OFFLOAD_SUPPORT */
5430 DHD_PERIM_UNLOCK(dhdp);
5435 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
5437 char iovbuf[WLC_IOCTL_SMLEN];
5438 uint32 tdls = tdls_on;
5440 uint32 tdls_auto_op = 0;
5441 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
5442 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
5443 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
5445 if (!FW_SUPPORTED(dhd, tdls))
5448 if (dhd->tdls_enable == tdls_on)
5450 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
5451 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5452 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
5455 dhd->tdls_enable = tdls_on;
5458 tdls_auto_op = auto_on;
5459 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
5460 iovbuf, sizeof(iovbuf));
5461 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5462 sizeof(iovbuf), TRUE, 0)) < 0) {
5463 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
5468 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
5469 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
5470 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5471 sizeof(iovbuf), TRUE, 0)) < 0) {
5472 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
5475 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
5476 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5477 sizeof(iovbuf), TRUE, 0)) < 0) {
5478 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
5481 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
5482 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5483 sizeof(iovbuf), TRUE, 0)) < 0) {
5484 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
5493 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
5495 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5498 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
5503 #ifdef PCIE_FULL_DONGLE
5504 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
5506 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5507 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
5508 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
5509 tdls_peer_node_t *new = NULL, *prev = NULL;
5511 uint8 sa[ETHER_ADDR_LEN];
5512 int ifidx = dhd_net2idx(dhd, dev);
5514 if (ifidx == DHD_BAD_IF)
5517 dhdif = dhd->iflist[ifidx];
5518 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
5521 while (cur != NULL) {
5522 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5523 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
5524 __FUNCTION__, __LINE__));
5530 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
5532 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
5535 memcpy(new->addr, da, ETHER_ADDR_LEN);
5536 new->next = dhdp->peer_tbl.node;
5537 dhdp->peer_tbl.node = new;
5538 dhdp->peer_tbl.tdls_peer_count++;
5541 while (cur != NULL) {
5542 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5543 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
5545 prev->next = cur->next;
5547 dhdp->peer_tbl.node = cur->next;
5548 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
5549 dhdp->peer_tbl.tdls_peer_count--;
5555 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
5558 #endif /* PCIE_FULL_DONGLE */
5561 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
5566 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
5568 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
5569 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
5574 #if !defined(AP) && defined(WLP2P)
5575 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
5576 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
5577 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
5578 * would still be named as fw_bcmdhd_apsta.
5581 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
5584 char buf[WLC_IOCTL_SMLEN];
5585 bool mchan_supported = FALSE;
5586 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
5587 * test mode, that means we only will use the mode as it is
5589 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
5591 if (FW_SUPPORTED(dhd, vsdb)) {
5592 mchan_supported = TRUE;
5594 if (!FW_SUPPORTED(dhd, p2p)) {
5595 DHD_TRACE(("Chip does not support p2p\n"));
5599 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
5600 memset(buf, 0, sizeof(buf));
5601 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
5602 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
5604 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
5609 /* By default, chip supports single chan concurrency,
5610 * now lets check for mchan
5612 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
5613 if (mchan_supported)
5614 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
5615 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
5616 /* For customer_hw4, although ICS,
5617 * we still support concurrent mode
5630 #ifdef SUPPORT_AP_POWERSAVE
5631 #define RXCHAIN_PWRSAVE_PPS 10
5632 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
5633 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
5634 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
5637 int32 pps = RXCHAIN_PWRSAVE_PPS;
5638 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
5639 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
5642 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5643 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5644 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5645 DHD_ERROR(("Failed to enable AP power save\n"));
5647 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
5648 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5649 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5650 DHD_ERROR(("Failed to set pps\n"));
5652 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
5653 4, iovbuf, sizeof(iovbuf));
5654 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5655 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5656 DHD_ERROR(("Failed to set quiet time\n"));
5658 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
5659 4, iovbuf, sizeof(iovbuf));
5660 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5661 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5662 DHD_ERROR(("Failed to set stas assoc check\n"));
5665 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5666 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5667 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5668 DHD_ERROR(("Failed to disable AP power save\n"));
5674 #endif /* SUPPORT_AP_POWERSAVE */
5677 #if defined(READ_CONFIG_FROM_FILE)
5678 #include <linux/fs.h>
5679 #include <linux/ctype.h>
5681 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5682 bool PM_control = TRUE;
5684 static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
5687 wl_country_t cspec = {{0}, -1, {0}};
5689 char *endptr = NULL;
5691 char smbuf[WLC_IOCTL_SMLEN*2];
5693 if (!strcmp(name, "country")) {
5694 revstr = strchr(value, '/');
5696 cspec.rev = strtoul(revstr + 1, &endptr, 10);
5697 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5698 cspec.country_abbrev[2] = '\0';
5699 memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
5702 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5703 memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
5704 get_customized_country_code(dhd->info->adapter,
5705 (char *)&cspec.country_abbrev, &cspec);
5707 memset(smbuf, 0, sizeof(smbuf));
5708 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
5709 cspec.country_abbrev, cspec.rev));
5710 iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
5711 smbuf, sizeof(smbuf));
5712 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5713 smbuf, iolen, TRUE, 0);
5714 } else if (!strcmp(name, "roam_scan_period")) {
5715 var_int = (int)simple_strtol(value, NULL, 0);
5716 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
5717 &var_int, sizeof(var_int), TRUE, 0);
5718 } else if (!strcmp(name, "roam_delta")) {
5723 x.val = (int)simple_strtol(value, NULL, 0);
5724 /* x.band = WLC_BAND_AUTO; */
5725 x.band = WLC_BAND_ALL;
5726 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
5727 } else if (!strcmp(name, "roam_trigger")) {
5730 roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
5731 roam_trigger[1] = WLC_BAND_ALL;
5732 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
5733 sizeof(roam_trigger), TRUE, 0);
5736 } else if (!strcmp(name, "PM")) {
5738 var_int = (int)simple_strtol(value, NULL, 0);
5740 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
5741 &var_int, sizeof(var_int), TRUE, 0);
5743 #if defined(CONFIG_PM_LOCK)
5745 g_pm_control = TRUE;
5746 printk("%s var_int=%d don't control PM\n", __func__, var_int);
5748 g_pm_control = FALSE;
5749 printk("%s var_int=%d do control PM\n", __func__, var_int);
5756 else if (!strcmp(name, "btamp_chan")) {
5762 btamp_chan = (int)simple_strtol(value, NULL, 0);
5763 iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
5764 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
5765 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
5766 __FUNCTION__, btamp_chan, ret));
5768 DHD_ERROR(("%s btamp_chan %d set success\n",
5769 __FUNCTION__, btamp_chan));
5771 #endif /* WLBTAMP */
5772 else if (!strcmp(name, "band")) {
5774 if (!strcmp(value, "auto"))
5775 var_int = WLC_BAND_AUTO;
5776 else if (!strcmp(value, "a"))
5777 var_int = WLC_BAND_5G;
5778 else if (!strcmp(value, "b"))
5779 var_int = WLC_BAND_2G;
5780 else if (!strcmp(value, "all"))
5781 var_int = WLC_BAND_ALL;
5783 printk(" set band value should be one of the a or b or all\n");
5784 var_int = WLC_BAND_AUTO;
5786 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
5787 sizeof(var_int), TRUE, 0)) < 0)
5788 printk(" set band err=%d\n", ret);
5790 } else if (!strcmp(name, "cur_etheraddr")) {
5791 struct ether_addr ea;
5796 bcm_ether_atoe(value, &ea);
5798 ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
5800 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
5804 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
5805 ea.octet[0], ea.octet[1], ea.octet[2],
5806 ea.octet[3], ea.octet[4], ea.octet[5]));
5808 iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
5810 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
5812 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5816 memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
5819 } else if (!strcmp(name, "lpc")) {
5823 var_int = (int)simple_strtol(value, NULL, 0);
5824 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5825 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
5827 iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf));
5828 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
5829 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
5831 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
5832 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
5835 } else if (!strcmp(name, "vht_features")) {
5839 var_int = (int)simple_strtol(value, NULL, 0);
5841 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5842 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
5844 iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf));
5845 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
5846 DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
5848 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
5849 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
5854 char iovbuf[WLC_IOCTL_SMLEN];
5856 /* wlu_iovar_setint */
5857 var_int = (int)simple_strtol(value, NULL, 0);
5859 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
5860 if (!strcmp(name, "roam_off")) {
5861 /* Setup timeout if Beacons are lost to report link down */
5863 uint bcn_timeout = 2;
5864 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
5865 iovbuf, sizeof(iovbuf));
5866 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5869 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
5871 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
5873 iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
5874 iovbuf, sizeof(iovbuf));
5875 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5876 iovbuf, iovlen, TRUE, 0);
5882 static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
5884 mm_segment_t old_fs;
5886 struct file *fp = NULL;
5888 char *buf = NULL, *p, *name, *value;
5892 config_path = CONFIG_BCMDHD_CONFIG_PATH;
5896 printk(KERN_ERR "config_path can't read. \n");
5902 if ((ret = vfs_stat(config_path, &stat))) {
5904 printk(KERN_ERR "%s: Failed to get information (%d)\n",
5910 if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
5911 printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
5915 printk("dhd_preinit_config : config path : %s \n", config_path);
5917 if (!(fp = dhd_os_open_image(config_path)) ||
5918 (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
5921 buf[stat.size] = '\0';
5922 for (p = buf; *p; p++) {
5925 for (name = p++; *p && !isspace(*p); p++) {
5929 for (value = p; *p && !isspace(*p); p++);
5931 if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
5932 printk(KERN_ERR "%s: %s=%s\n",
5933 bcmerrorstr(ret), name, value);
5943 dhd_os_close_image(fp);
5945 MFREE(dhd->osh, buf, stat.size+1);
5952 #endif /* READ_CONFIG_FROM_FILE */
5955 dhd_preinit_ioctls(dhd_pub_t *dhd)
5958 char eventmask[WL_EVENTING_MASK_LEN];
5959 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
5960 uint32 buf_key_b4_m4 = 1;
5965 eventmsgs_ext_t *eventmask_msg = NULL;
5966 char* iov_buf = NULL;
5969 aibss_bcn_force_config_t bcn_config;
5973 #endif /* WLAIBSS_PS */
5974 #endif /* WLAIBSS */
5975 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
5978 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
5979 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
5980 uint32 ampdu_ba_wsize = 0;
5981 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
5982 #if defined(CUSTOM_AMPDU_MPDU)
5983 int32 ampdu_mpdu = 0;
5985 #if defined(CUSTOM_AMPDU_RELEASE)
5986 int32 ampdu_release = 0;
5988 #if defined(CUSTOM_AMSDU_AGGSF)
5989 int32 amsdu_aggsf = 0;
5992 #if defined(BCMSDIO)
5993 #ifdef PROP_TXSTATUS
5994 int wlfc_enable = TRUE;
5996 uint32 hostreorder = 1;
5998 #endif /* DISABLE_11N */
5999 #endif /* PROP_TXSTATUS */
6001 #ifdef PCIE_FULL_DONGLE
6002 uint32 wl_ap_isolate;
6003 #endif /* PCIE_FULL_DONGLE */
6005 #ifdef DHD_ENABLE_LPC
6007 #endif /* DHD_ENABLE_LPC */
6008 uint power_mode = PM_FAST;
6009 uint32 dongle_align = DHD_SDALIGN;
6010 #if defined(BCMSDIO)
6011 uint32 glom = CUSTOM_GLOM_SETTING;
6012 #endif /* defined(BCMSDIO) */
6013 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6016 uint bcn_timeout = dhd->conf->bcn_timeout;
6018 #if defined(ARP_OFFLOAD_SUPPORT)
6021 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
6022 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
6023 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
6024 char buf[WLC_IOCTL_SMLEN];
6026 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
6029 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
6030 int roam_scan_period[2] = {10, WLC_BAND_ALL};
6031 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
6032 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
6033 int roam_fullscan_period = 60;
6034 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6035 int roam_fullscan_period = 120;
6036 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6038 #ifdef DISABLE_BUILTIN_ROAM
6040 #endif /* DISABLE_BUILTIN_ROAM */
6041 #endif /* ROAM_ENABLE */
6046 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
6047 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
6048 struct ether_addr p2p_ea;
6053 #ifdef SOFTAP_UAPSD_OFF
6054 uint32 wme_apsd = 0;
6055 #endif /* SOFTAP_UAPSD_OFF */
6056 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
6057 uint32 apsta = 1; /* Enable APSTA mode */
6058 #elif defined(SOFTAP_AND_GC)
6061 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
6062 #ifdef GET_CUSTOM_MAC_ENABLE
6063 struct ether_addr ea_addr;
6064 #endif /* GET_CUSTOM_MAC_ENABLE */
6068 #endif /* DISABLE_11N */
6070 #if defined(DISABLE_11AC)
6072 #endif /* DISABLE_11AC */
6075 #endif /* USE_WL_TXBF */
6076 #ifdef AMPDU_VO_ENABLE
6077 struct ampdu_tid_control tid;
6079 #ifdef USE_WL_FRAMEBURST
6080 uint32 frameburst = 1;
6081 #endif /* USE_WL_FRAMEBURST */
6082 #ifdef DHD_SET_FW_HIGHSPEED
6083 uint32 ack_ratio = 250;
6084 uint32 ack_ratio_depth = 64;
6085 #endif /* DHD_SET_FW_HIGHSPEED */
6086 #ifdef SUPPORT_2G_VHT
6087 uint32 vht_features = 0x3; /* 2G enable | rates all */
6088 #endif /* SUPPORT_2G_VHT */
6089 #ifdef CUSTOM_PSPRETEND_THR
6090 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
6092 #ifdef PKT_FILTER_SUPPORT
6093 dhd_pkt_filter_enable = TRUE;
6094 #endif /* PKT_FILTER_SUPPORT */
6096 dhd->tdls_enable = FALSE;
6098 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
6099 DHD_TRACE(("Enter %s\n", __FUNCTION__));
6101 dhd_conf_set_band(dhd);
6102 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
6103 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
6106 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6107 (op_mode == DHD_FLAG_MFG_MODE)) {
6108 /* Check and adjust IOCTL response timeout for Manufactring firmware */
6109 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
6110 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
6114 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
6115 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
6117 #ifdef GET_CUSTOM_MAC_ENABLE
6118 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
6120 memset(buf, 0, sizeof(buf));
6121 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
6122 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6124 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
6125 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
6129 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
6131 #endif /* GET_CUSTOM_MAC_ENABLE */
6132 /* Get the default device MAC address directly from firmware */
6133 memset(buf, 0, sizeof(buf));
6134 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
6135 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
6137 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
6141 /* Update public MAC address after reading from Firmware */
6142 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
6144 #ifdef GET_CUSTOM_MAC_ENABLE
6146 #endif /* GET_CUSTOM_MAC_ENABLE */
6148 /* get a capabilities from firmware */
6149 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
6150 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
6151 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
6152 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
6153 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6154 __FUNCTION__, ret));
6157 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
6158 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
6159 #ifdef SET_RANDOM_MAC_SOFTAP
6162 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
6163 #if defined(ARP_OFFLOAD_SUPPORT)
6166 #ifdef PKT_FILTER_SUPPORT
6167 dhd_pkt_filter_enable = FALSE;
6169 #ifdef SET_RANDOM_MAC_SOFTAP
6170 SRANDOM32((uint)jiffies);
6171 rand_mac = RANDOM32();
6172 iovbuf[0] = 0x02; /* locally administered bit */
6175 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
6176 iovbuf[4] = (unsigned char)(rand_mac >> 8);
6177 iovbuf[5] = (unsigned char)(rand_mac >> 16);
6179 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
6180 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6182 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6184 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
6185 #endif /* SET_RANDOM_MAC_SOFTAP */
6186 #if !defined(AP) && defined(WL_CFG80211)
6187 /* Turn off MPC in AP mode */
6188 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6189 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6190 sizeof(iovbuf), TRUE, 0)) < 0) {
6191 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
6194 #ifdef SUPPORT_AP_POWERSAVE
6195 dhd_set_ap_powersave(dhd, 0, TRUE);
6197 #ifdef SOFTAP_UAPSD_OFF
6198 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
6199 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6200 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
6201 #endif /* SOFTAP_UAPSD_OFF */
6202 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6203 (op_mode == DHD_FLAG_MFG_MODE)) {
6204 #if defined(ARP_OFFLOAD_SUPPORT)
6206 #endif /* ARP_OFFLOAD_SUPPORT */
6207 #ifdef PKT_FILTER_SUPPORT
6208 dhd_pkt_filter_enable = FALSE;
6209 #endif /* PKT_FILTER_SUPPORT */
6210 dhd->op_mode = DHD_FLAG_MFG_MODE;
6212 uint32 concurrent_mode = 0;
6213 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
6214 (op_mode == DHD_FLAG_P2P_MODE)) {
6215 #if defined(ARP_OFFLOAD_SUPPORT)
6218 #ifdef PKT_FILTER_SUPPORT
6219 dhd_pkt_filter_enable = FALSE;
6221 dhd->op_mode = DHD_FLAG_P2P_MODE;
6222 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
6223 (op_mode == DHD_FLAG_IBSS_MODE)) {
6224 dhd->op_mode = DHD_FLAG_IBSS_MODE;
6226 dhd->op_mode = DHD_FLAG_STA_MODE;
6227 #if !defined(AP) && defined(WLP2P)
6228 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
6229 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
6230 #if defined(ARP_OFFLOAD_SUPPORT)
6233 dhd->op_mode |= concurrent_mode;
6236 /* Check if we are enabling p2p */
6237 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6238 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6239 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6240 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6241 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
6244 #if defined(SOFTAP_AND_GC)
6245 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
6246 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
6247 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
6250 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
6251 ETHER_SET_LOCALADDR(&p2p_ea);
6252 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
6253 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
6254 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6255 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6256 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
6258 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
6262 (void)concurrent_mode;
6266 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
6267 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
6268 /* Set Country code */
6269 if (dhd->dhd_cspec.ccode[0] != 0) {
6270 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
6271 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
6272 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
6273 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6274 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
6276 dhd_conf_set_country(dhd);
6277 dhd_conf_fix_country(dhd);
6279 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
6281 #if defined(DISABLE_11AC)
6282 bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
6283 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6284 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
6285 #endif /* DISABLE_11AC */
6287 /* Set Listen Interval */
6288 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
6289 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6290 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
6292 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
6293 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
6294 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
6295 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6296 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
6297 #if defined(ROAM_ENABLE)
6298 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
6299 sizeof(roam_trigger), TRUE, 0)) < 0)
6300 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
6301 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
6302 sizeof(roam_scan_period), TRUE, 0)) < 0)
6303 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
6304 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
6305 sizeof(roam_delta), TRUE, 0)) < 0)
6306 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
6307 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
6308 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6309 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
6310 #endif /* ROAM_ENABLE */
6311 dhd_conf_set_roam(dhd);
6314 bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
6315 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6318 /* by default TDLS on and auto mode off */
6319 _dhd_tdls_enable(dhd, true, false, NULL);
6322 #ifdef DHD_ENABLE_LPC
6324 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
6325 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6326 sizeof(iovbuf), TRUE, 0)) < 0) {
6327 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
6329 #endif /* DHD_ENABLE_LPC */
6330 dhd_conf_set_lpc(dhd);
6332 /* Set PowerSave mode */
6333 if (dhd->conf->pm >= 0)
6334 power_mode = dhd->conf->pm;
6335 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
6337 /* Match Host and Dongle rx alignment */
6338 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
6339 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6341 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6342 /* enable credall to reduce the chance of no bus credit happened. */
6343 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
6344 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6347 #if defined(BCMSDIO)
6348 if (glom != DEFAULT_GLOM_VALUE) {
6349 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
6350 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6351 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6353 #endif /* defined(BCMSDIO) */
6354 dhd_conf_set_bus_txglom(dhd);
6356 /* Setup timeout if Beacons are lost and roam is off to report link down */
6357 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
6358 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6359 /* Setup assoc_retry_max count to reconnect target AP in dongle */
6360 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
6361 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6362 #if defined(AP) && !defined(WLP2P)
6363 /* Turn off MPC in AP mode */
6364 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6365 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6366 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6367 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6368 #endif /* defined(AP) && !defined(WLP2P) */
6369 dhd_conf_set_mimo_bw_cap(dhd);
6370 dhd_conf_force_wme(dhd);
6371 dhd_conf_set_stbc(dhd);
6372 dhd_conf_set_srl(dhd);
6373 dhd_conf_set_lrl(dhd);
6374 dhd_conf_set_spect(dhd);
6377 if (ap_fw_loaded == TRUE) {
6378 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
6382 #if defined(KEEP_ALIVE)
6384 /* Set Keep Alive : be sure to use FW with -keepalive */
6388 if (ap_fw_loaded == FALSE)
6390 if (!(dhd->op_mode &
6391 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
6392 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
6393 DHD_ERROR(("%s set keeplive failed %d\n",
6394 __FUNCTION__, res));
6397 #endif /* defined(KEEP_ALIVE) */
6400 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
6401 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6402 sizeof(iovbuf), TRUE, 0)) < 0) {
6403 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
6405 #endif /* USE_WL_TXBF */
6406 dhd_conf_set_txbf(dhd);
6407 #ifdef USE_WL_FRAMEBURST
6408 /* Set frameburst to value */
6409 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
6410 sizeof(frameburst), TRUE, 0)) < 0) {
6411 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
6413 #endif /* USE_WL_FRAMEBURST */
6414 dhd_conf_set_frameburst(dhd);
6415 #ifdef DHD_SET_FW_HIGHSPEED
6417 bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
6418 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6419 sizeof(iovbuf), TRUE, 0)) < 0) {
6420 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
6423 /* Set ack_ratio_depth */
6424 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
6425 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6426 sizeof(iovbuf), TRUE, 0)) < 0) {
6427 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
6429 #endif /* DHD_SET_FW_HIGHSPEED */
6430 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
6431 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
6432 /* Set ampdu ba wsize to 64 or 16 */
6433 #ifdef CUSTOM_AMPDU_BA_WSIZE
6434 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
6436 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
6437 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
6438 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
6439 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
6440 if (ampdu_ba_wsize != 0) {
6441 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
6442 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6443 sizeof(iovbuf), TRUE, 0)) < 0) {
6444 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
6445 __FUNCTION__, ampdu_ba_wsize, ret));
6448 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
6449 dhd_conf_set_ampdu_ba_wsize(dhd);
6451 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
6452 if (iov_buf == NULL) {
6453 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
6458 /* Configure custom IBSS beacon transmission */
6459 if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
6462 bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
6463 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6464 sizeof(iovbuf), TRUE, 0)) < 0) {
6465 DHD_ERROR(("%s Set aibss to %d failed %d\n",
6466 __FUNCTION__, aibss, ret));
6470 bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
6471 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6472 sizeof(iovbuf), TRUE, 0)) < 0) {
6473 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
6474 __FUNCTION__, aibss, ret));
6476 #endif /* WLAIBSS_PS */
6478 memset(&bcn_config, 0, sizeof(bcn_config));
6479 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
6480 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
6481 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
6482 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
6483 bcn_config.len = sizeof(bcn_config);
6485 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
6486 sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN);
6487 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
6488 WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6489 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
6490 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
6491 AIBSS_BCN_FLOOD_DUR, ret));
6493 #endif /* WLAIBSS */
6495 #if defined(CUSTOM_AMPDU_MPDU)
6496 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
6497 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
6498 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
6499 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6500 sizeof(iovbuf), TRUE, 0)) < 0) {
6501 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
6502 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
6505 #endif /* CUSTOM_AMPDU_MPDU */
6507 #if defined(CUSTOM_AMPDU_RELEASE)
6508 ampdu_release = CUSTOM_AMPDU_RELEASE;
6509 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
6510 bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf));
6511 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6512 sizeof(iovbuf), TRUE, 0)) < 0) {
6513 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
6514 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
6517 #endif /* CUSTOM_AMPDU_RELEASE */
6519 #if defined(CUSTOM_AMSDU_AGGSF)
6520 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
6521 if (amsdu_aggsf != 0) {
6522 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
6523 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6524 sizeof(iovbuf), TRUE, 0)) < 0) {
6525 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
6526 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
6529 #endif /* CUSTOM_AMSDU_AGGSF */
6531 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
6532 /* Read 4-way handshake requirements */
6533 if (dhd_use_idsup == 1) {
6534 bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
6535 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
6536 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
6537 * in-dongle supplicant.
6539 if (ret >= 0 || ret == BCME_NOTREADY)
6540 dhd->fw_4way_handshake = TRUE;
6541 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
6543 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
6544 #ifdef SUPPORT_2G_VHT
6545 bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
6546 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6547 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
6549 #endif /* SUPPORT_2G_VHT */
6550 #ifdef CUSTOM_PSPRETEND_THR
6551 /* Turn off MPC in AP mode */
6552 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
6553 iovbuf, sizeof(iovbuf));
6554 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6555 sizeof(iovbuf), TRUE, 0)) < 0) {
6556 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
6557 __FUNCTION__, ret));
6561 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
6562 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6563 sizeof(iovbuf), TRUE, 0)) < 0) {
6564 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
6567 /* Read event_msgs mask */
6568 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6569 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
6570 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
6573 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
6575 /* Setup event_msgs */
6576 setbit(eventmask, WLC_E_SET_SSID);
6577 setbit(eventmask, WLC_E_PRUNE);
6578 setbit(eventmask, WLC_E_AUTH);
6579 setbit(eventmask, WLC_E_AUTH_IND);
6580 setbit(eventmask, WLC_E_ASSOC);
6581 setbit(eventmask, WLC_E_REASSOC);
6582 setbit(eventmask, WLC_E_REASSOC_IND);
6583 setbit(eventmask, WLC_E_DEAUTH);
6584 setbit(eventmask, WLC_E_DEAUTH_IND);
6585 setbit(eventmask, WLC_E_DISASSOC_IND);
6586 setbit(eventmask, WLC_E_DISASSOC);
6587 setbit(eventmask, WLC_E_JOIN);
6588 setbit(eventmask, WLC_E_START);
6589 setbit(eventmask, WLC_E_ASSOC_IND);
6590 setbit(eventmask, WLC_E_PSK_SUP);
6591 setbit(eventmask, WLC_E_LINK);
6592 setbit(eventmask, WLC_E_NDIS_LINK);
6593 setbit(eventmask, WLC_E_MIC_ERROR);
6594 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
6595 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
6597 setbit(eventmask, WLC_E_PMKID_CACHE);
6598 setbit(eventmask, WLC_E_TXFAIL);
6600 setbit(eventmask, WLC_E_JOIN_START);
6601 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
6603 setbit(eventmask, WLC_E_HTSFSYNC);
6604 #endif /* WLMEDIA_HTSF */
6606 setbit(eventmask, WLC_E_PFN_NET_FOUND);
6607 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
6608 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
6609 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
6610 #endif /* PNO_SUPPORT */
6611 /* enable dongle roaming event */
6612 setbit(eventmask, WLC_E_ROAM);
6613 setbit(eventmask, WLC_E_BSSID);
6615 setbit(eventmask, WLC_E_ADDTS_IND);
6616 setbit(eventmask, WLC_E_DELTS_IND);
6619 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
6622 setbit(eventmask, WLC_E_ESCAN_RESULT);
6623 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6624 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
6625 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
6627 #endif /* WL_CFG80211 */
6629 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
6630 #endif /* WLAIBSS */
6631 #ifdef CUSTOMER_HW10
6632 clrbit(eventmask, WLC_E_TRACE);
6634 setbit(eventmask, WLC_E_TRACE);
6636 /* Write updated Event mask */
6637 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6638 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6639 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
6643 /* make up event mask ext message iovar for event larger than 128 */
6644 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
6645 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
6646 if (eventmask_msg == NULL) {
6647 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
6651 bzero(eventmask_msg, msglen);
6652 eventmask_msg->ver = EVENTMSGS_VER;
6653 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6655 /* Read event_msgs_ext mask */
6656 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
6657 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
6658 if (ret2 != BCME_UNSUPPORTED)
6660 if (ret2 == 0) { /* event_msgs_ext must be supported */
6661 bcopy(iov_buf, eventmask_msg, msglen);
6663 #ifdef BT_WIFI_HANDOVER
6664 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
6665 #endif /* BT_WIFI_HANDOVER */
6667 /* Write updated Event mask */
6668 eventmask_msg->ver = EVENTMSGS_VER;
6669 eventmask_msg->command = EVENTMSGS_SET_MASK;
6670 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6671 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
6672 msglen, iov_buf, WLC_IOCTL_SMLEN);
6673 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6674 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6675 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
6678 } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
6679 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
6681 } /* unsupported is ok */
6683 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
6684 sizeof(scan_assoc_time), TRUE, 0);
6685 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
6686 sizeof(scan_unassoc_time), TRUE, 0);
6687 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
6688 sizeof(scan_passive_time), TRUE, 0);
6690 #ifdef ARP_OFFLOAD_SUPPORT
6691 /* Set and enable ARP offload feature for STA only */
6693 if (arpoe && !ap_fw_loaded)
6698 dhd_arp_offload_enable(dhd, TRUE);
6699 dhd_arp_offload_set(dhd, dhd_arp_mode);
6701 dhd_arp_offload_enable(dhd, FALSE);
6702 dhd_arp_offload_set(dhd, 0);
6704 dhd_arp_enable = arpoe;
6705 #endif /* ARP_OFFLOAD_SUPPORT */
6707 #ifdef PKT_FILTER_SUPPORT
6708 /* Setup default defintions for pktfilter , enable in suspend */
6709 dhd->pktfilter_count = 6;
6710 /* Setup filter to allow only unicast */
6711 if (dhd_master_mode) {
6712 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
6713 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
6714 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
6715 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
6716 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
6717 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
6718 /* apply APP pktfilter */
6719 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
6721 dhd_conf_discard_pkt_filter(dhd);
6722 dhd_conf_add_pkt_filter(dhd);
6726 dhd_enable_packet_filter(0, dhd);
6728 #endif /* defined(SOFTAP) */
6729 dhd_set_packet_filter(dhd);
6730 #endif /* PKT_FILTER_SUPPORT */
6732 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
6733 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6734 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
6735 #endif /* DISABLE_11N */
6737 #ifdef AMPDU_VO_ENABLE
6738 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
6740 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6741 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6743 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
6745 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6746 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6748 #if defined(SOFTAP_TPUT_ENHANCE)
6749 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6750 dhd_bus_setidletime(dhd, (int)100);
6751 #ifdef DHDTCPACK_SUPPRESS
6752 dhd->tcpack_sup_enabled = FALSE;
6754 #if defined(DHD_TCP_WINSIZE_ADJUST)
6755 dhd_use_tcp_window_size_adjust = TRUE;
6758 memset(buf, 0, sizeof(buf));
6759 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
6760 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
6762 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6763 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6768 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
6770 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6774 #endif /* SOFTAP_TPUT_ENHANCE */
6776 /* query for 'ver' to get version info from firmware */
6777 memset(buf, 0, sizeof(buf));
6779 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
6780 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
6781 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6783 bcmstrtok(&ptr, "\n", 0);
6784 /* Print fw version info */
6785 DHD_ERROR(("Firmware version = %s\n", buf));
6786 dhd_set_version_info(dhd, buf);
6789 #if defined(BCMSDIO)
6790 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
6791 #endif /* defined(BCMSDIO) */
6793 dhd_conf_set_disable_proptx(dhd);
6794 #if defined(BCMSDIO)
6795 #ifdef PROP_TXSTATUS
6796 if (disable_proptx ||
6797 #ifdef PROP_TXSTATUS_VSDB
6798 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
6799 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
6800 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
6801 #endif /* PROP_TXSTATUS_VSDB */
6803 wlfc_enable = FALSE;
6807 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
6808 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
6809 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6810 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
6811 if (ret2 != BCME_UNSUPPORTED)
6813 if (ret2 != BCME_OK)
6816 #endif /* DISABLE_11N */
6818 #ifdef READ_CONFIG_FROM_FILE
6819 dhd_preinit_config(dhd, 0);
6820 #endif /* READ_CONFIG_FROM_FILE */
6825 else if (hostreorder)
6826 dhd_wlfc_hostreorder_init(dhd);
6827 #endif /* DISABLE_11N */
6829 #endif /* PROP_TXSTATUS */
6830 #endif /* BCMSDIO || BCMBUS */
6831 #ifdef PCIE_FULL_DONGLE
6832 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
6833 if (FW_SUPPORTED(dhd, ap)) {
6834 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
6835 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
6836 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6837 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6839 #endif /* PCIE_FULL_DONGLE */
6841 if (!dhd->pno_state) {
6846 dhd_interworking_enable(dhd);
6849 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
6855 kfree(eventmask_msg);
6864 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
6866 char buf[strlen(name) + 1 + cmd_len];
6867 int len = sizeof(buf);
6871 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
6873 memset(&ioc, 0, sizeof(ioc));
6875 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
6880 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
6881 if (!set && ret >= 0)
6882 memcpy(cmd_buf, buf, cmd_len);
6887 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
6889 struct dhd_info *dhd = dhdp->info;
6890 struct net_device *dev = NULL;
6892 ASSERT(dhd && dhd->iflist[ifidx]);
6893 dev = dhd->iflist[ifidx]->net;
6896 if (netif_running(dev)) {
6897 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
6898 return BCME_NOTDOWN;
6901 #define DHD_MIN_MTU 1500
6902 #define DHD_MAX_MTU 1752
6904 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
6905 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
6913 #ifdef ARP_OFFLOAD_SUPPORT
6914 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
6916 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
6918 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
6922 bzero(ipv4_buf, sizeof(ipv4_buf));
6924 /* display what we've got */
6925 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6926 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
6928 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6930 /* now we saved hoste_ip table, clr it in the dongle AOE */
6931 dhd_aoe_hostip_clr(dhd_pub, idx);
6934 DHD_ERROR(("%s failed\n", __FUNCTION__));
6938 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6939 if (add && (ipv4_buf[i] == 0)) {
6941 add = FALSE; /* added ipa to local table */
6942 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
6944 } else if (ipv4_buf[i] == ipa) {
6946 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
6947 __FUNCTION__, ipa, i));
6950 if (ipv4_buf[i] != 0) {
6951 /* add back host_ip entries from our local cache */
6952 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
6953 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
6954 __FUNCTION__, ipv4_buf[i], i));
6958 /* see the resulting hostip table */
6959 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6960 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
6961 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6966 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
6967 * whenever there is an event related to an IP address.
6968 * ptr : kernel provided pointer to IP address that has changed
6970 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
6971 unsigned long event,
6974 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
6980 if (!dhd_arp_enable)
6982 if (!ifa || !(ifa->ifa_dev->dev))
6985 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6986 /* Filter notifications meant for non Broadcom devices */
6987 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
6988 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
6989 #if defined(WL_ENABLE_P2P_IF)
6990 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
6991 #endif /* WL_ENABLE_P2P_IF */
6994 #endif /* LINUX_VERSION_CODE */
6996 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
7000 dhd_pub = &dhd->pub;
7002 if (dhd_pub->arp_version == 1) {
7006 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
7007 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
7010 if (idx < DHD_MAX_IFS)
7011 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
7012 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
7014 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
7021 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
7022 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7024 if (dhd->pub.busstate != DHD_BUS_DATA) {
7025 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
7026 if (dhd->pend_ipaddr) {
7027 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
7028 __FUNCTION__, dhd->pend_ipaddr));
7030 dhd->pend_ipaddr = ifa->ifa_address;
7034 #ifdef AOE_IP_ALIAS_SUPPORT
7035 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
7037 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
7038 #endif /* AOE_IP_ALIAS_SUPPORT */
7042 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
7043 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7044 dhd->pend_ipaddr = 0;
7045 #ifdef AOE_IP_ALIAS_SUPPORT
7046 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
7048 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
7050 dhd_aoe_hostip_clr(&dhd->pub, idx);
7051 dhd_aoe_arp_clr(&dhd->pub, idx);
7052 #endif /* AOE_IP_ALIAS_SUPPORT */
7056 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
7057 __func__, ifa->ifa_label, event));
7062 #endif /* ARP_OFFLOAD_SUPPORT */
7065 /* Neighbor Discovery Offload: defered handler */
7067 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
7069 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
7070 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
7073 if (event != DHD_WQ_WORK_IPV6_NDO) {
7074 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7079 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
7084 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
7088 if (ndo_work->if_idx) {
7089 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
7093 switch (ndo_work->event) {
7095 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__));
7096 ret = dhd_ndo_enable(pub, TRUE);
7098 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
7101 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
7103 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
7104 __FUNCTION__, ret));
7108 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
7109 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
7111 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
7112 __FUNCTION__, ret));
7116 ret = dhd_ndo_enable(pub, FALSE);
7118 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
7123 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
7127 /* free ndo_work. alloced while scheduling the work */
7134 * Neighbor Discovery Offload: Called when an interface
7135 * is assigned with ipv6 address.
7136 * Handles only primary interface
7138 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
7139 unsigned long event,
7144 struct inet6_ifaddr *inet6_ifa = ptr;
7145 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
7146 struct ipv6_work_info_t *ndo_info;
7147 int idx = 0; /* REVISIT */
7149 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7150 /* Filter notifications meant for non Broadcom devices */
7151 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
7154 #endif /* LINUX_VERSION_CODE */
7156 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
7160 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
7162 dhd_pub = &dhd->pub;
7163 if (!FW_SUPPORTED(dhd_pub, ndoe))
7166 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
7168 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
7172 ndo_info->event = event;
7173 ndo_info->if_idx = idx;
7174 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
7176 /* defer the work to thread as it may block kernel */
7177 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
7178 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
7181 #endif /* #ifdef CONFIG_IPV6 */
7184 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
7186 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7188 struct net_device *net = NULL;
7190 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
7192 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
7194 ASSERT(dhd && dhd->iflist[ifidx]);
7195 ifp = dhd->iflist[ifidx];
7197 ASSERT(net && (ifp->idx == ifidx));
7200 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7202 net->get_stats = dhd_get_stats;
7203 net->do_ioctl = dhd_ioctl_entry;
7204 net->hard_start_xmit = dhd_start_xmit;
7205 net->set_mac_address = dhd_set_mac_address;
7206 net->set_multicast_list = dhd_set_multicast_list;
7207 net->open = net->stop = NULL;
7209 ASSERT(!net->netdev_ops);
7210 net->netdev_ops = &dhd_ops_virt;
7211 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7213 net->netdev_ops = &dhd_cfgp2p_ops_virt;
7214 #endif /* P2PONEINT */
7216 /* Ok, link into the network layer... */
7219 * device functions for the primary interface only
7221 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7222 net->open = dhd_open;
7223 net->stop = dhd_stop;
7225 net->netdev_ops = &dhd_ops_pri;
7226 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7227 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
7228 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7231 * We have to use the primary MAC for virtual interfaces
7233 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
7235 * Android sets the locally administered bit to indicate that this is a
7236 * portable hotspot. This will not work in simultaneous AP/STA mode,
7237 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7239 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
7241 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7242 __func__, net->name));
7243 temp_addr[0] |= 0x02;
7247 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
7248 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
7249 net->ethtool_ops = &dhd_ethtool_ops;
7250 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
7252 #if defined(WL_WIRELESS_EXT)
7253 #if WIRELESS_EXT < 19
7254 net->get_wireless_stats = dhd_get_wireless_stats;
7255 #endif /* WIRELESS_EXT < 19 */
7256 #if WIRELESS_EXT > 12
7257 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
7258 #endif /* WIRELESS_EXT > 12 */
7259 #endif /* defined(WL_WIRELESS_EXT) */
7261 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
7263 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
7266 printf("%s\n", dhd_version);
7269 err = register_netdev(net);
7271 err = register_netdevice(net);
7274 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
7279 err = custom_rps_map_set(net->_rx, RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
7281 DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__, err));
7282 #endif /* SET_RPS_CPUS */
7286 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
7287 MAC2STRDBG(net->dev_addr));
7289 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
7290 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
7293 #if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
7294 KERNEL_VERSION(2, 6, 27))))
7297 up(&dhd_registration_sem);
7299 if (!dhd_download_fw_on_driverload) {
7300 dhd_net_bus_devreset(net, TRUE);
7302 dhd_net_bus_suspend(net);
7303 #endif /* BCMLXSDMMC */
7304 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
7307 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
7311 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
7314 net->netdev_ops = NULL;
7320 dhd_bus_detach(dhd_pub_t *dhdp)
7324 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7327 dhd = (dhd_info_t *)dhdp->info;
7331 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
7332 * calling stop again will cuase SD read/write errors.
7334 if (dhd->pub.busstate != DHD_BUS_DOWN) {
7335 /* Stop the protocol module */
7336 dhd_prot_stop(&dhd->pub);
7338 /* Stop the bus module */
7339 dhd_bus_stop(dhd->pub.bus, TRUE);
7342 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7343 dhd_bus_oob_intr_unregister(dhdp);
7350 void dhd_detach(dhd_pub_t *dhdp)
7353 unsigned long flags;
7354 int timer_valid = FALSE;
7359 dhd = (dhd_info_t *)dhdp->info;
7363 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7365 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7367 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
7370 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
7371 /* Give sufficient time for threads to start running in case
7372 * dhd_attach() has failed
7377 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
7378 dhd_bus_detach(dhdp);
7379 #ifdef PCIE_FULL_DONGLE
7380 dhd_flow_rings_deinit(dhdp);
7384 dhd_prot_detach(dhdp);
7387 #ifdef ARP_OFFLOAD_SUPPORT
7388 if (dhd_inetaddr_notifier_registered) {
7389 dhd_inetaddr_notifier_registered = FALSE;
7390 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7392 #endif /* ARP_OFFLOAD_SUPPORT */
7394 if (dhd_inet6addr_notifier_registered) {
7395 dhd_inet6addr_notifier_registered = FALSE;
7396 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7400 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7401 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
7402 if (dhd->early_suspend.suspend)
7403 unregister_early_suspend(&dhd->early_suspend);
7405 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7407 #if defined(WL_WIRELESS_EXT)
7408 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
7409 /* Detatch and unlink in the iw */
7412 #endif /* defined(WL_WIRELESS_EXT) */
7414 /* delete all interfaces, start with virtual */
7415 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
7419 /* Cleanup virtual interfaces */
7420 dhd_net_if_lock_local(dhd);
7421 for (i = 1; i < DHD_MAX_IFS; i++) {
7423 dhd_remove_if(&dhd->pub, i, TRUE);
7425 dhd_net_if_unlock_local(dhd);
7427 /* delete primary interface 0 */
7428 ifp = dhd->iflist[0];
7431 if (ifp && ifp->net) {
7435 /* in unregister_netdev case, the interface gets freed by net->destructor
7436 * (which is set to free_netdev)
7438 if (ifp->net->reg_state == NETREG_UNINITIALIZED)
7439 free_netdev(ifp->net);
7442 custom_rps_map_clear(ifp->net->_rx);
7443 #endif /* SET_RPS_CPUS */
7444 unregister_netdev(ifp->net);
7448 dhd_wmf_cleanup(dhdp, 0);
7449 #endif /* DHD_WMF */
7451 dhd_if_del_sta_list(ifp);
7453 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
7454 dhd->iflist[0] = NULL;
7458 /* Clear the watchdog timer */
7459 DHD_GENERAL_LOCK(&dhd->pub, flags);
7460 timer_valid = dhd->wd_timer_valid;
7461 dhd->wd_timer_valid = FALSE;
7462 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7464 del_timer_sync(&dhd->timer);
7466 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
7467 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
7468 PROC_STOP(&dhd->thr_wdt_ctl);
7471 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
7472 PROC_STOP(&dhd->thr_rxf_ctl);
7475 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
7476 PROC_STOP(&dhd->thr_dpc_ctl);
7478 tasklet_kill(&dhd->tasklet);
7481 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
7482 wl_cfg80211_detach(NULL);
7483 dhd_monitor_uninit();
7486 /* free deferred work queue */
7487 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
7488 dhd->dhd_deferred_wq = NULL;
7490 #ifdef SHOW_LOGTRACE
7491 if (dhd->event_data.fmts)
7492 kfree(dhd->event_data.fmts);
7493 if (dhd->event_data.raw_fmts)
7494 kfree(dhd->event_data.raw_fmts);
7495 #endif /* SHOW_LOGTRACE */
7498 if (dhdp->pno_state)
7499 dhd_pno_deinit(dhdp);
7501 #if defined(CONFIG_PM_SLEEP)
7502 if (dhd_pm_notifier_registered) {
7503 unregister_pm_notifier(&dhd_pm_notifier);
7504 dhd_pm_notifier_registered = FALSE;
7506 #endif /* CONFIG_PM_SLEEP */
7507 #ifdef DEBUG_CPU_FREQ
7509 free_percpu(dhd->new_freq);
7510 dhd->new_freq = NULL;
7511 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7513 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
7514 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
7515 #ifdef CONFIG_HAS_WAKELOCK
7516 dhd->wakelock_counter = 0;
7517 dhd->wakelock_wd_counter = 0;
7518 dhd->wakelock_rx_timeout_enable = 0;
7519 dhd->wakelock_ctrl_timeout_enable = 0;
7520 wake_lock_destroy(&dhd->wl_wifi);
7521 wake_lock_destroy(&dhd->wl_rxwake);
7522 wake_lock_destroy(&dhd->wl_ctrlwake);
7523 wake_lock_destroy(&dhd->wl_wdwake);
7524 #ifdef BCMPCIE_OOB_HOST_WAKE
7525 wake_lock_destroy(&dhd->wl_intrwake);
7526 #endif /* BCMPCIE_OOB_HOST_WAKE */
7527 #endif /* CONFIG_HAS_WAKELOCK */
7533 #ifdef DHDTCPACK_SUPPRESS
7534 /* This will free all MEM allocated for TCPACK SUPPRESS */
7535 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7536 #endif /* DHDTCPACK_SUPPRESS */
7537 dhd_conf_detach(dhdp);
7542 dhd_free(dhd_pub_t *dhdp)
7545 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7549 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7550 if (dhdp->reorder_bufs[i]) {
7551 reorder_info_t *ptr;
7552 uint32 buf_size = sizeof(struct reorder_info);
7554 ptr = dhdp->reorder_bufs[i];
7556 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7557 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7558 i, ptr->max_idx, buf_size));
7560 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7561 dhdp->reorder_bufs[i] = NULL;
7565 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
7567 dhd = (dhd_info_t *)dhdp->info;
7568 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
7570 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
7571 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
7577 dhd_clear(dhd_pub_t *dhdp)
7579 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7583 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7584 if (dhdp->reorder_bufs[i]) {
7585 reorder_info_t *ptr;
7586 uint32 buf_size = sizeof(struct reorder_info);
7588 ptr = dhdp->reorder_bufs[i];
7590 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7591 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7592 i, ptr->max_idx, buf_size));
7594 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7595 dhdp->reorder_bufs[i] = NULL;
7599 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
7604 dhd_module_cleanup(void)
7606 printf("%s: Enter\n", __FUNCTION__);
7608 dhd_bus_unregister();
7612 dhd_wifi_platform_unregister_drv();
7613 printf("%s: Exit\n", __FUNCTION__);
7617 dhd_module_exit(void)
7619 dhd_module_cleanup();
7620 unregister_reboot_notifier(&dhd_reboot_notifier);
7624 dhd_module_init(void)
7627 int retry = POWERUP_MAX_RETRY;
7629 printf("%s: in\n", __FUNCTION__);
7631 DHD_PERIM_RADIO_INIT();
7633 if (firmware_path[0] != '\0') {
7634 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
7635 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7638 if (nvram_path[0] != '\0') {
7639 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
7640 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7644 err = dhd_wifi_platform_register_drv();
7646 register_reboot_notifier(&dhd_reboot_notifier);
7650 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
7651 __FUNCTION__, retry));
7652 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
7653 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
7654 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
7655 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
7660 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
7662 printf("%s: Exit err=%d\n", __FUNCTION__, err);
7667 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
7669 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
7670 if (code == SYS_RESTART) {
7676 #include <linux/rfkill-wlan.h>
7677 extern int get_wifi_chip_type(void);
7678 extern char WIFI_MODULE_NAME[];
7679 extern char RKWIFI_DRV_VERSION[];
7681 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7682 static int wifi_init_thread(void *data)
7689 int rockchip_wifi_init_module_rkwifi(void)
7691 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7692 int type = get_wifi_chip_type();
7693 if (type > WIFI_AP6XXX_SERIES) return 0;
7695 printf("=======================================================\n");
7696 printf("==== Launching Wi-Fi driver! (Powered by Rockchip) ====\n");
7697 printf("=======================================================\n");
7698 printf("%s WiFi driver (Powered by Rockchip,Ver %s) init.\n", WIFI_MODULE_NAME, RKWIFI_DRV_VERSION);
7700 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7702 struct task_struct *kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
7703 if (kthread->pid < 0)
7704 printf("create wifi_init_thread failed.\n");
7708 return dhd_module_init();
7712 void rockchip_wifi_exit_module_rkwifi(void)
7714 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7715 int type = get_wifi_chip_type();
7716 if (type > WIFI_AP6XXX_SERIES) return;
7718 printf("=======================================================\n");
7719 printf("== Dis-launching Wi-Fi driver! (Powered by Rockchip) ==\n");
7720 printf("=======================================================\n");
7724 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
7725 late_initcall(rockchip_wifi_init_module_rkwifi);
7726 module_exit(rockchip_wifi_exit_module_rkwifi);
7728 EXPORT_SYMBOL(rockchip_wifi_init_module_rkwifi);
7729 EXPORT_SYMBOL(rockchip_wifi_exit_module_rkwifi);
7731 //#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7732 //#if defined(CONFIG_DEFERRED_INITCALLS)
7733 //deferred_module_init(dhd_module_init);
7734 //#elif defined(USE_LATE_INITCALL_SYNC)
7735 //late_initcall_sync(dhd_module_init);
7737 //late_initcall(dhd_module_init);
7738 //#endif /* USE_LATE_INITCALL_SYNC */
7740 //module_init(dhd_module_init);
7741 //#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7743 //module_exit(dhd_module_exit);
7746 * OS specific functions required to implement DHD driver in OS independent way
7749 dhd_os_proto_block(dhd_pub_t *pub)
7751 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7754 DHD_PERIM_UNLOCK(pub);
7756 down(&dhd->proto_sem);
7758 DHD_PERIM_LOCK(pub);
7766 dhd_os_proto_unblock(dhd_pub_t *pub)
7768 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7771 up(&dhd->proto_sem);
7779 dhd_os_get_ioctl_resp_timeout(void)
7781 return ((unsigned int)dhd_ioctl_timeout_msec);
7785 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
7787 dhd_ioctl_timeout_msec = (int)timeout_msec;
7791 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
7793 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7796 /* Convert timeout in millsecond to jiffies */
7797 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7798 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
7800 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
7803 DHD_PERIM_UNLOCK(pub);
7805 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
7807 DHD_PERIM_LOCK(pub);
7813 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
7815 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
7817 wake_up(&dhd->ioctl_resp_wait);
7822 dhd_os_wd_timer_extend(void *bus, bool extend)
7824 dhd_pub_t *pub = bus;
7825 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7828 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
7830 dhd_os_wd_timer(bus, dhd->default_wd_interval);
7835 dhd_os_wd_timer(void *bus, uint wdtick)
7837 dhd_pub_t *pub = bus;
7838 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7839 unsigned long flags;
7841 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7844 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
7848 DHD_GENERAL_LOCK(pub, flags);
7850 /* don't start the wd until fw is loaded */
7851 if (pub->busstate == DHD_BUS_DOWN) {
7852 DHD_GENERAL_UNLOCK(pub, flags);
7854 DHD_OS_WD_WAKE_UNLOCK(pub);
7858 /* Totally stop the timer */
7859 if (!wdtick && dhd->wd_timer_valid == TRUE) {
7860 dhd->wd_timer_valid = FALSE;
7861 DHD_GENERAL_UNLOCK(pub, flags);
7862 del_timer_sync(&dhd->timer);
7863 DHD_OS_WD_WAKE_UNLOCK(pub);
7868 DHD_OS_WD_WAKE_LOCK(pub);
7869 dhd_watchdog_ms = (uint)wdtick;
7870 /* Re arm the timer, at last watchdog period */
7871 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
7872 dhd->wd_timer_valid = TRUE;
7874 DHD_GENERAL_UNLOCK(pub, flags);
7878 dhd_os_open_image(char *filename)
7882 fp = filp_open(filename, O_RDONLY, 0);
7884 * 2.6.11 (FC4) supports filp_open() but later revs don't?
7886 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
7896 dhd_os_get_image_block(char *buf, int len, void *image)
7898 struct file *fp = (struct file *)image;
7904 rdlen = kernel_read(fp, fp->f_pos, buf, len);
7912 dhd_os_close_image(void *image)
7915 filp_close((struct file *)image, NULL);
7919 dhd_os_sdlock(dhd_pub_t *pub)
7923 dhd = (dhd_info_t *)(pub->info);
7925 if (dhd_dpc_prio >= 0)
7928 spin_lock_bh(&dhd->sdlock);
7932 dhd_os_sdunlock(dhd_pub_t *pub)
7936 dhd = (dhd_info_t *)(pub->info);
7938 if (dhd_dpc_prio >= 0)
7941 spin_unlock_bh(&dhd->sdlock);
7945 dhd_os_sdlock_txq(dhd_pub_t *pub)
7949 dhd = (dhd_info_t *)(pub->info);
7950 spin_lock_bh(&dhd->txqlock);
7954 dhd_os_sdunlock_txq(dhd_pub_t *pub)
7958 dhd = (dhd_info_t *)(pub->info);
7959 spin_unlock_bh(&dhd->txqlock);
7963 dhd_os_sdlock_rxq(dhd_pub_t *pub)
7968 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
7973 dhd_os_rxflock(dhd_pub_t *pub)
7977 dhd = (dhd_info_t *)(pub->info);
7978 spin_lock_bh(&dhd->rxf_lock);
7983 dhd_os_rxfunlock(dhd_pub_t *pub)
7987 dhd = (dhd_info_t *)(pub->info);
7988 spin_unlock_bh(&dhd->rxf_lock);
7991 #ifdef DHDTCPACK_SUPPRESS
7993 dhd_os_tcpacklock(dhd_pub_t *pub)
7997 dhd = (dhd_info_t *)(pub->info);
7998 spin_lock_bh(&dhd->tcpack_lock);
8003 dhd_os_tcpackunlock(dhd_pub_t *pub)
8007 dhd = (dhd_info_t *)(pub->info);
8008 spin_unlock_bh(&dhd->tcpack_lock);
8010 #endif /* DHDTCPACK_SUPPRESS */
8012 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
8015 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
8017 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
8019 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
8020 " size: %dbytes\n", __FUNCTION__, section, size));
8021 if (kmalloc_if_fail)
8022 buf = kmalloc(size, flags);
8028 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
8032 #if defined(WL_WIRELESS_EXT)
8033 struct iw_statistics *
8034 dhd_get_wireless_stats(struct net_device *dev)
8037 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8043 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
8046 return &dhd->iw.wstats;
8050 #endif /* defined(WL_WIRELESS_EXT) */
8052 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8054 dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
8055 wl_event_msg_t *event, void **data)
8058 char eabuf[ETHER_ADDR_STR_LEN];
8059 struct ether_addr *addr = &event->addr;
8060 uint32 type = ntoh32_ua((void *)&event->event_type);
8065 bcm_ether_ntoa(addr, eabuf);
8067 return (BCME_ERROR);
8069 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8070 if (dhd->wlanaudio_blist[cnt].is_blacklist)
8073 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
8074 addr, ETHER_ADDR_LEN)) {
8075 /* Mac address is Same */
8076 dhd->wlanaudio_blist[cnt].cnt++;
8078 if (dhd->wlanaudio_blist[cnt].cnt < 15) {
8079 /* black list is false */
8080 if ((dhd->wlanaudio_blist[cnt].cnt > 10) &&
8081 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
8083 dhd->wlanaudio_blist[cnt].is_blacklist = true;
8084 dhd->is_wlanaudio_blist = true;
8087 if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
8088 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
8091 bzero(&dhd->wlanaudio_blist[cnt],
8092 sizeof(struct wlanaudio_blacklist));
8096 } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
8097 (!dhd->wlanaudio_blist[cnt].cnt)) {
8099 (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr,
8101 dhd->wlanaudio_blist[cnt].cnt++;
8102 dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies;
8104 bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf);
8110 case WLC_E_AUTH_IND :
8112 case WLC_E_DEAUTH_IND :
8114 case WLC_E_ASSOC_IND:
8116 case WLC_E_REASSOC_IND:
8117 case WLC_E_DISASSOC:
8118 case WLC_E_DISASSOC_IND:
8123 bcm_ether_ntoa(addr, eabuf);
8125 return (BCME_ERROR);
8127 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8128 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
8129 addr, ETHER_ADDR_LEN)) {
8130 /* Mac address is Same */
8131 if (dhd->wlanaudio_blist[cnt].is_blacklist) {
8132 /* black list is true */
8133 bzero(&dhd->wlanaudio_blist[cnt],
8134 sizeof(struct wlanaudio_blacklist));
8139 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8140 if (dhd->wlanaudio_blist[cnt].is_blacklist)
8146 dhd->is_wlanaudio_blist = false;
8154 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8156 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
8157 wl_event_msg_t *event, void **data)
8161 ASSERT(dhd != NULL);
8163 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8164 bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data);
8166 if (bcmerror != BCME_OK)
8168 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8170 #ifdef SHOW_LOGTRACE
8171 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
8173 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
8174 #endif /* SHOW_LOGTRACE */
8176 if (bcmerror != BCME_OK)
8179 #if defined(WL_WIRELESS_EXT)
8180 if (event->bsscfgidx == 0) {
8182 * Wireless ext is on primary interface only
8185 ASSERT(dhd->iflist[*ifidx] != NULL);
8186 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8188 if (dhd->iflist[*ifidx]->net) {
8189 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
8192 #endif /* defined(WL_WIRELESS_EXT) */
8195 ASSERT(dhd->iflist[*ifidx] != NULL);
8196 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8197 if (dhd->iflist[*ifidx]->net)
8198 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
8199 #endif /* defined(WL_CFG80211) */
8204 /* send up locally generated event */
8206 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
8208 switch (ntoh32(event->event_type)) {
8210 /* Send up locally generated AMP HCI Events */
8211 case WLC_E_BTA_HCI_EVENT: {
8212 struct sk_buff *p, *skb;
8214 wl_event_msg_t *p_bcm_event;
8223 len = ntoh32(event->datalen);
8224 pktlen = sizeof(bcm_event_t) + len + 2;
8226 ifidx = dhd_ifname2idx(dhd, event->ifname);
8228 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8229 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8231 msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
8233 bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
8234 bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
8235 ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
8237 msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
8239 /* BCM Vendor specific header... */
8240 msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
8241 msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
8242 bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
8244 /* vendor spec header length + pvt data length (private indication
8245 * hdr + actual message itself)
8247 msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
8248 BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
8249 msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
8251 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
8253 /* copy wl_event_msg_t into sk_buf */
8255 /* pointer to wl_event_msg_t in sk_buf */
8256 p_bcm_event = &msg->event;
8257 bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
8259 /* copy hci event into sk_buf */
8260 bcopy(data, (p_bcm_event + 1), len);
8262 msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
8263 ntoh16(msg->bcm_hdr.length));
8264 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
8266 ptr = (char *)(msg + 1);
8267 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
8268 * are no ethertypes which are following this
8273 skb = PKTTONATIVE(dhdp->osh, p);
8277 ifp = dhd->iflist[ifidx];
8279 ifp = dhd->iflist[0];
8282 skb->dev = ifp->net;
8283 skb->protocol = eth_type_trans(skb, skb->dev);
8288 /* Strip header, count, deliver upward */
8289 skb_pull(skb, ETH_HLEN);
8291 /* Send the packet */
8292 if (in_interrupt()) {
8299 /* Could not allocate a sk_buf */
8300 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
8303 } /* case WLC_E_BTA_HCI_EVENT */
8304 #endif /* WLBTAMP */
8311 #ifdef LOG_INTO_TCPDUMP
8313 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
8315 struct sk_buff *p, *skb;
8322 struct ether_header eth;
8324 pktlen = sizeof(eth) + data_len;
8327 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8328 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8330 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
8331 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
8332 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
8333 eth.ether_type = hton16(ETHER_TYPE_BRCM);
8335 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
8336 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
8337 skb = PKTTONATIVE(dhdp->osh, p);
8338 skb_data = skb->data;
8341 ifidx = dhd_ifname2idx(dhd, "wlan0");
8342 ifp = dhd->iflist[ifidx];
8344 ifp = dhd->iflist[0];
8347 skb->dev = ifp->net;
8348 skb->protocol = eth_type_trans(skb, skb->dev);
8349 skb->data = skb_data;
8352 /* Strip header, count, deliver upward */
8353 skb_pull(skb, ETH_HLEN);
8355 /* Send the packet */
8356 if (in_interrupt()) {
8363 /* Could not allocate a sk_buf */
8364 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
8367 #endif /* LOG_INTO_TCPDUMP */
8369 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
8371 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8372 struct dhd_info *dhdinfo = dhd->info;
8374 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8375 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
8377 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
8378 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8380 dhd_os_sdunlock(dhd);
8381 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
8383 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
8387 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
8389 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8390 struct dhd_info *dhdinfo = dhd->info;
8391 if (waitqueue_active(&dhdinfo->ctrl_wait))
8392 wake_up(&dhdinfo->ctrl_wait);
8397 #if defined(BCMSDIO) || defined(BCMPCIE)
8399 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
8402 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8405 /* Issue wl down command before resetting the chip */
8406 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
8407 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
8409 #ifdef PROP_TXSTATUS
8410 if (dhd->pub.wlfc_enabled)
8411 dhd_wlfc_deinit(&dhd->pub);
8412 #endif /* PROP_TXSTATUS */
8414 if (dhd->pub.pno_state)
8415 dhd_pno_deinit(&dhd->pub);
8421 dhd_update_fw_nv_path(dhd);
8422 /* update firmware and nvram path to sdio bus */
8423 dhd_bus_update_fw_nv_path(dhd->pub.bus,
8424 dhd->fw_path, dhd->nv_path, dhd->conf_path);
8426 #endif /* BCMSDIO */
8428 ret = dhd_bus_devreset(&dhd->pub, flag);
8430 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
8439 dhd_net_bus_suspend(struct net_device *dev)
8441 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8442 return dhd_bus_suspend(&dhd->pub);
8446 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
8448 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8449 return dhd_bus_resume(&dhd->pub, stage);
8452 #endif /* BCMSDIO */
8453 #endif /* BCMSDIO || BCMPCIE */
8455 int net_os_set_suspend_disable(struct net_device *dev, int val)
8457 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8461 ret = dhd->pub.suspend_disable_flag;
8462 dhd->pub.suspend_disable_flag = val;
8467 int net_os_set_suspend(struct net_device *dev, int val, int force)
8470 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8473 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8474 ret = dhd_set_suspend(val, &dhd->pub);
8476 ret = dhd_suspend_resume_helper(dhd, val, force);
8479 wl_cfg80211_update_power_mode(dev);
8485 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
8487 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8490 dhd->pub.suspend_bcn_li_dtim = val;
8495 #ifdef PKT_FILTER_SUPPORT
8496 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
8498 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8499 char *filterp = NULL;
8503 if (!dhd_master_mode)
8504 add_remove = !add_remove;
8506 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
8507 (num == DHD_MDNS_FILTER_NUM))
8509 if (num >= dhd->pub.pktfilter_count)
8512 case DHD_BROADCAST_FILTER_NUM:
8513 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
8516 case DHD_MULTICAST4_FILTER_NUM:
8517 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
8520 case DHD_MULTICAST6_FILTER_NUM:
8521 filterp = "103 0 0 0 0xFFFF 0x3333";
8530 dhd->pub.pktfilter[num] = filterp;
8531 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
8532 } else { /* Delete filter */
8533 if (dhd->pub.pktfilter[num] != NULL) {
8534 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
8535 dhd->pub.pktfilter[num] = NULL;
8541 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
8546 /* Packet filtering is set only if we still in early-suspend and
8547 * we need either to turn it ON or turn it OFF
8548 * We can always turn it OFF in case of early-suspend, but we turn it
8549 * back ON only if suspend_disable_flag was not set
8551 if (dhdp && dhdp->up) {
8552 if (dhdp->in_suspend) {
8553 if (!val || (val && !dhdp->suspend_disable_flag))
8554 dhd_enable_packet_filter(val, dhdp);
8560 /* function to enable/disable packet for Network device */
8561 int net_os_enable_packet_filter(struct net_device *dev, int val)
8563 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8565 return dhd_os_enable_packet_filter(&dhd->pub, val);
8567 #endif /* PKT_FILTER_SUPPORT */
8570 dhd_dev_init_ioctl(struct net_device *dev)
8572 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8575 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
8583 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
8585 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
8587 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8589 return (dhd_pno_stop_for_ssid(&dhd->pub));
8591 /* Linux wrapper to call common dhd_pno_set_for_ssid */
8593 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
8594 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
8596 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8598 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
8599 pno_repeat, pno_freq_expo_max, channel_list, nchan));
8602 /* Linux wrapper to call common dhd_pno_enable */
8604 dhd_dev_pno_enable(struct net_device *dev, int enable)
8606 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8608 return (dhd_pno_enable(&dhd->pub, enable));
8611 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
8613 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
8614 struct dhd_pno_hotlist_params *hotlist_params)
8616 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8617 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
8619 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
8621 dhd_dev_pno_stop_for_batch(struct net_device *dev)
8623 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8624 return (dhd_pno_stop_for_batch(&dhd->pub));
8626 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
8628 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
8630 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8631 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
8633 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
8635 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
8637 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8638 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
8640 #endif /* PNO_SUPPORT */
8642 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
8643 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
8646 struct net_device *dev;
8648 dhd = (dhd_info_t *)dhd_info;
8649 dev = dhd->iflist[0]->net;
8655 #if defined(WL_WIRELESS_EXT)
8656 wl_iw_send_priv_event(dev, "HANG");
8658 #if defined(WL_CFG80211)
8659 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8665 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
8669 if (!dhdp->hang_was_sent) {
8670 dhdp->hang_was_sent = 1;
8671 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
8672 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
8678 int net_os_send_hang_message(struct net_device *dev)
8680 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8684 /* Report FW problem when enabled */
8685 if (dhd->pub.hang_report) {
8686 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8687 ret = dhd_os_send_hang_message(&dhd->pub);
8689 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8692 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
8694 /* Enforce bus down to stop any future traffic */
8695 dhd->pub.busstate = DHD_BUS_DOWN;
8700 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
8703 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
8705 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8706 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
8709 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
8710 wl_country_t *cspec)
8712 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8713 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
8715 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
8717 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8718 if (dhd && dhd->pub.up) {
8719 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
8721 wl_update_wiphybands(NULL, notify);
8726 void dhd_bus_band_set(struct net_device *dev, uint band)
8728 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8729 if (dhd && dhd->pub.up) {
8731 wl_update_wiphybands(NULL, true);
8736 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
8738 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8740 if (!fw || fw[0] == '\0')
8743 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
8744 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
8747 if (strstr(fw, "apsta") != NULL) {
8748 DHD_INFO(("GOT APSTA FIRMWARE\n"));
8749 ap_fw_loaded = TRUE;
8751 DHD_INFO(("GOT STA FIRMWARE\n"));
8752 ap_fw_loaded = FALSE;
8758 void dhd_net_if_lock(struct net_device *dev)
8760 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8761 dhd_net_if_lock_local(dhd);
8764 void dhd_net_if_unlock(struct net_device *dev)
8766 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8767 dhd_net_if_unlock_local(dhd);
8770 static void dhd_net_if_lock_local(dhd_info_t *dhd)
8772 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8774 mutex_lock(&dhd->dhd_net_if_mutex);
8778 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
8780 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8782 mutex_unlock(&dhd->dhd_net_if_mutex);
8786 static void dhd_suspend_lock(dhd_pub_t *pub)
8788 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8789 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8791 mutex_lock(&dhd->dhd_suspend_mutex);
8795 static void dhd_suspend_unlock(dhd_pub_t *pub)
8797 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8798 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8800 mutex_unlock(&dhd->dhd_suspend_mutex);
8804 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
8806 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8807 unsigned long flags = 0;
8810 spin_lock_irqsave(&dhd->dhd_lock, flags);
8815 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
8817 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8820 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
8823 /* Linux specific multipurpose spinlock API */
8825 dhd_os_spin_lock_init(osl_t *osh)
8827 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
8828 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
8829 /* and this results in kernel asserts in internal builds */
8830 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
8832 spin_lock_init(lock);
8833 return ((void *)lock);
8836 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
8838 MFREE(osh, lock, sizeof(spinlock_t) + 4);
8841 dhd_os_spin_lock(void *lock)
8843 unsigned long flags = 0;
8846 spin_lock_irqsave((spinlock_t *)lock, flags);
8851 dhd_os_spin_unlock(void *lock, unsigned long flags)
8854 spin_unlock_irqrestore((spinlock_t *)lock, flags);
8858 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
8860 return (atomic_read(&dhd->pend_8021x_cnt));
8863 #define MAX_WAIT_FOR_8021X_TX 100
8866 dhd_wait_pend8021x(struct net_device *dev)
8868 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8869 int timeout = msecs_to_jiffies(10);
8870 int ntimes = MAX_WAIT_FOR_8021X_TX;
8871 int pend = dhd_get_pend_8021x_cnt(dhd);
8873 while (ntimes && pend) {
8875 set_current_state(TASK_INTERRUPTIBLE);
8876 DHD_PERIM_UNLOCK(&dhd->pub);
8877 schedule_timeout(timeout);
8878 DHD_PERIM_LOCK(&dhd->pub);
8879 set_current_state(TASK_RUNNING);
8882 pend = dhd_get_pend_8021x_cnt(dhd);
8886 atomic_set(&dhd->pend_8021x_cnt, 0);
8887 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
8894 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
8898 mm_segment_t old_fs;
8901 /* change to KERNEL_DS address limit */
8905 /* open file to write */
8906 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
8908 printf("%s: open file error\n", __FUNCTION__);
8913 /* Write buf to file */
8914 fp->f_op->write(fp, buf, size, &pos);
8917 /* free buf before return */
8918 MFREE(dhd->osh, buf, size);
8919 /* close file before return */
8921 filp_close(fp, current->files);
8922 /* restore previous address limit */
8927 #endif /* DHD_DEBUG */
8929 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
8931 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8932 unsigned long flags;
8936 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8937 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
8938 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
8939 #ifdef CONFIG_HAS_WAKELOCK
8940 if (dhd->wakelock_rx_timeout_enable)
8941 wake_lock_timeout(&dhd->wl_rxwake,
8942 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
8943 if (dhd->wakelock_ctrl_timeout_enable)
8944 wake_lock_timeout(&dhd->wl_ctrlwake,
8945 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
8947 dhd->wakelock_rx_timeout_enable = 0;
8948 dhd->wakelock_ctrl_timeout_enable = 0;
8949 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8954 int net_os_wake_lock_timeout(struct net_device *dev)
8956 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8960 ret = dhd_os_wake_lock_timeout(&dhd->pub);
8964 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
8966 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8967 unsigned long flags;
8970 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8971 if (val > dhd->wakelock_rx_timeout_enable)
8972 dhd->wakelock_rx_timeout_enable = val;
8973 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8978 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
8980 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8981 unsigned long flags;
8984 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8985 if (val > dhd->wakelock_ctrl_timeout_enable)
8986 dhd->wakelock_ctrl_timeout_enable = val;
8987 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8992 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
8994 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8995 unsigned long flags;
8998 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8999 dhd->wakelock_ctrl_timeout_enable = 0;
9000 #ifdef CONFIG_HAS_WAKELOCK
9001 if (wake_lock_active(&dhd->wl_ctrlwake))
9002 wake_unlock(&dhd->wl_ctrlwake);
9004 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9009 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
9011 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9015 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
9019 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
9021 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9025 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
9029 int dhd_os_wake_lock(dhd_pub_t *pub)
9031 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9032 unsigned long flags;
9036 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9038 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9039 #ifdef CONFIG_HAS_WAKELOCK
9040 wake_lock(&dhd->wl_wifi);
9041 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9042 dhd_bus_dev_pm_stay_awake(pub);
9045 dhd->wakelock_counter++;
9046 ret = dhd->wakelock_counter;
9047 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9052 int net_os_wake_lock(struct net_device *dev)
9054 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9058 ret = dhd_os_wake_lock(&dhd->pub);
9062 int dhd_os_wake_unlock(dhd_pub_t *pub)
9064 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9065 unsigned long flags;
9068 dhd_os_wake_lock_timeout(pub);
9070 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9071 if (dhd->wakelock_counter > 0) {
9072 dhd->wakelock_counter--;
9073 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9074 #ifdef CONFIG_HAS_WAKELOCK
9075 wake_unlock(&dhd->wl_wifi);
9076 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9077 dhd_bus_dev_pm_relax(pub);
9080 ret = dhd->wakelock_counter;
9082 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9087 int dhd_os_check_wakelock(dhd_pub_t *pub)
9089 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9090 KERNEL_VERSION(2, 6, 36)))
9095 dhd = (dhd_info_t *)(pub->info);
9096 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9098 #ifdef CONFIG_HAS_WAKELOCK
9099 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9100 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9101 (wake_lock_active(&dhd->wl_wdwake))))
9103 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9104 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9110 int dhd_os_check_wakelock_all(dhd_pub_t *pub)
9112 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9113 KERNEL_VERSION(2, 6, 36)))
9118 dhd = (dhd_info_t *)(pub->info);
9119 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9121 #ifdef CONFIG_HAS_WAKELOCK
9122 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9123 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9124 wake_lock_active(&dhd->wl_wdwake) ||
9125 wake_lock_active(&dhd->wl_rxwake) ||
9126 wake_lock_active(&dhd->wl_ctrlwake))) {
9129 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9130 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9136 int net_os_wake_unlock(struct net_device *dev)
9138 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9142 ret = dhd_os_wake_unlock(&dhd->pub);
9146 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
9148 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9149 unsigned long flags;
9153 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9154 #ifdef CONFIG_HAS_WAKELOCK
9155 /* if wakelock_wd_counter was never used : lock it at once */
9156 if (!dhd->wakelock_wd_counter)
9157 wake_lock(&dhd->wl_wdwake);
9159 dhd->wakelock_wd_counter++;
9160 ret = dhd->wakelock_wd_counter;
9161 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9166 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
9168 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9169 unsigned long flags;
9173 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9174 if (dhd->wakelock_wd_counter) {
9175 dhd->wakelock_wd_counter = 0;
9176 #ifdef CONFIG_HAS_WAKELOCK
9177 wake_unlock(&dhd->wl_wdwake);
9180 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9185 #ifdef BCMPCIE_OOB_HOST_WAKE
9186 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
9188 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9192 #ifdef CONFIG_HAS_WAKELOCK
9193 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
9199 int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
9201 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9205 #ifdef CONFIG_HAS_WAKELOCK
9206 /* if wl_intrwake is active, unlock it */
9207 if (wake_lock_active(&dhd->wl_intrwake)) {
9208 wake_unlock(&dhd->wl_intrwake);
9214 #endif /* BCMPCIE_OOB_HOST_WAKE */
9216 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
9217 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
9219 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
9221 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9222 unsigned long flags;
9226 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9227 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9228 if (dhd->waive_wakelock == FALSE) {
9229 /* record current lock status */
9230 dhd->wakelock_before_waive = dhd->wakelock_counter;
9231 dhd->waive_wakelock = TRUE;
9233 ret = dhd->wakelock_wd_counter;
9234 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9239 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
9241 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9242 unsigned long flags;
9248 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9249 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9250 if (!dhd->waive_wakelock)
9253 dhd->waive_wakelock = FALSE;
9254 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
9255 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
9256 * the lock in between, do the same by calling wake_unlock or pm_relax
9258 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
9259 #ifdef CONFIG_HAS_WAKELOCK
9260 wake_lock(&dhd->wl_wifi);
9261 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9262 dhd_bus_dev_pm_stay_awake(&dhd->pub);
9264 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
9265 #ifdef CONFIG_HAS_WAKELOCK
9266 wake_unlock(&dhd->wl_wifi);
9267 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9268 dhd_bus_dev_pm_relax(&dhd->pub);
9271 dhd->wakelock_before_waive = 0;
9273 ret = dhd->wakelock_wd_counter;
9274 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9278 bool dhd_os_check_if_up(dhd_pub_t *pub)
9285 /* function to collect firmware, chip id and chip version info */
9286 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
9290 i = snprintf(info_string, sizeof(info_string),
9291 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
9292 printf("%s\n", info_string);
9297 i = snprintf(&info_string[i], sizeof(info_string) - i,
9298 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
9299 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
9302 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
9306 dhd_info_t *dhd = NULL;
9308 if (!net || !DEV_PRIV(net)) {
9309 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
9313 dhd = DHD_DEV_INFO(net);
9317 ifidx = dhd_net2idx(dhd, net);
9318 if (ifidx == DHD_BAD_IF) {
9319 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
9323 DHD_OS_WAKE_LOCK(&dhd->pub);
9324 DHD_PERIM_LOCK(&dhd->pub);
9326 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
9327 dhd_check_hang(net, &dhd->pub, ret);
9329 DHD_PERIM_UNLOCK(&dhd->pub);
9330 DHD_OS_WAKE_UNLOCK(&dhd->pub);
9335 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
9337 struct net_device *net;
9339 net = dhd_idx2net(dhdp, ifidx);
9341 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
9345 return dhd_check_hang(net, dhdp, ret);
9348 /* Return instance */
9349 int dhd_get_instance(dhd_pub_t *dhdp)
9351 return dhdp->info->unit;
9355 #ifdef PROP_TXSTATUS
9357 void dhd_wlfc_plat_init(void *dhd)
9362 void dhd_wlfc_plat_deinit(void *dhd)
9367 bool dhd_wlfc_skip_fc(void)
9371 #endif /* PROP_TXSTATUS */
9375 #include <linux/debugfs.h>
9377 extern uint32 dhd_readregl(void *bp, uint32 addr);
9378 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
9380 typedef struct dhd_dbgfs {
9381 struct dentry *debugfs_dir;
9382 struct dentry *debugfs_mem;
9387 dhd_dbgfs_t g_dbgfs;
9390 dhd_dbg_state_open(struct inode *inode, struct file *file)
9392 file->private_data = inode->i_private;
9397 dhd_dbg_state_read(struct file *file, char __user *ubuf,
9398 size_t count, loff_t *ppos)
9407 if (pos >= g_dbgfs.size || !count)
9409 if (count > g_dbgfs.size - pos)
9410 count = g_dbgfs.size - pos;
9412 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
9413 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
9415 ret = copy_to_user(ubuf, &tmp, 4);
9420 *ppos = pos + count;
9428 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
9436 if (pos >= g_dbgfs.size || !count)
9438 if (count > g_dbgfs.size - pos)
9439 count = g_dbgfs.size - pos;
9441 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
9445 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
9446 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
9453 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
9462 pos = file->f_pos + off;
9465 pos = g_dbgfs.size - off;
9467 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
9470 static const struct file_operations dhd_dbg_state_ops = {
9471 .read = dhd_dbg_state_read,
9472 .write = dhd_debugfs_write,
9473 .open = dhd_dbg_state_open,
9474 .llseek = dhd_debugfs_lseek
9477 static void dhd_dbg_create(void)
9479 if (g_dbgfs.debugfs_dir) {
9480 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
9481 NULL, &dhd_dbg_state_ops);
9485 void dhd_dbg_init(dhd_pub_t *dhdp)
9489 g_dbgfs.dhdp = dhdp;
9490 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
9492 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
9493 if (IS_ERR(g_dbgfs.debugfs_dir)) {
9494 err = PTR_ERR(g_dbgfs.debugfs_dir);
9495 g_dbgfs.debugfs_dir = NULL;
9504 void dhd_dbg_remove(void)
9506 debugfs_remove(g_dbgfs.debugfs_mem);
9507 debugfs_remove(g_dbgfs.debugfs_dir);
9509 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
9512 #endif /* ifdef BCMDBGFS */
9517 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
9519 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
9520 struct sk_buff *skb;
9522 uint16 dport = 0, oldmagic = 0xACAC;
9526 /* timestamp packet */
9528 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
9530 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
9531 /* memcpy(&proto, p1+26, 4); */
9532 memcpy(&dport, p1+40, 2);
9533 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
9534 dport = ntoh16(dport);
9537 /* timestamp only if icmp or udb iperf with port 5555 */
9538 /* if (proto == 17 && dport == tsport) { */
9539 if (dport >= tsport && dport <= tsport + 20) {
9541 skb = (struct sk_buff *) pktbuf;
9543 htsf = dhd_get_htsf(dhd, 0);
9544 memset(skb->data + 44, 0, 2); /* clear checksum */
9545 memcpy(skb->data+82, &oldmagic, 2);
9546 memcpy(skb->data+84, &htsf, 4);
9548 memset(&ts, 0, sizeof(htsfts_t));
9549 ts.magic = HTSFMAGIC;
9550 ts.prio = PKTPRIO(pktbuf);
9551 ts.seqnum = htsf_seqnum++;
9552 ts.c10 = get_cycles();
9554 ts.endmagic = HTSFENDMAGIC;
9556 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
9560 static void dhd_dump_htsfhisto(histo_t *his, char *s)
9562 int pktcnt = 0, curval = 0, i;
9563 for (i = 0; i < (NUMBIN-2); i++) {
9565 printf("%d ", his->bin[i]);
9566 pktcnt += his->bin[i];
9568 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
9569 his->bin[NUMBIN-1], s);
9573 void sorttobin(int value, histo_t *histo)
9578 histo->bin[NUMBIN-1]++;
9581 if (value > histo->bin[NUMBIN-2]) /* store the max value */
9582 histo->bin[NUMBIN-2] = value;
9584 for (i = 0; i < (NUMBIN-2); i++) {
9585 binval += 500; /* 500m s bins */
9586 if (value <= binval) {
9591 histo->bin[NUMBIN-3]++;
9595 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
9597 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9598 struct sk_buff *skb;
9601 int d1, d2, d3, end2end;
9605 skb = PKTTONATIVE(dhdp->osh, pktbuf);
9606 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
9608 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
9609 memcpy(&old_magic, p1+78, 2);
9610 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
9615 if (htsf_ts->magic == HTSFMAGIC) {
9616 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
9617 htsf_ts->cE0 = get_cycles();
9620 if (old_magic == 0xACAC) {
9623 htsf = dhd_get_htsf(dhd, 0);
9624 memcpy(skb->data+92, &htsf, sizeof(uint32));
9626 memcpy(&ts[tsidx].t1, skb->data+80, 16);
9628 d1 = ts[tsidx].t2 - ts[tsidx].t1;
9629 d2 = ts[tsidx].t3 - ts[tsidx].t2;
9630 d3 = ts[tsidx].t4 - ts[tsidx].t3;
9631 end2end = ts[tsidx].t4 - ts[tsidx].t1;
9633 sorttobin(d1, &vi_d1);
9634 sorttobin(d2, &vi_d2);
9635 sorttobin(d3, &vi_d3);
9636 sorttobin(end2end, &vi_d4);
9638 if (end2end > 0 && end2end > maxdelay) {
9640 maxdelaypktno = tspktcnt;
9641 memcpy(&maxdelayts, &ts[tsidx], 16);
9643 if (++tsidx >= TSMAX)
9648 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
9650 uint32 htsf = 0, cur_cycle, delta, delta_us;
9651 uint32 factor, baseval, baseval2;
9657 if (cur_cycle > dhd->htsf.last_cycle)
9658 delta = cur_cycle - dhd->htsf.last_cycle;
9660 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
9665 if (dhd->htsf.coef) {
9666 /* times ten to get the first digit */
9667 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
9668 baseval = (delta*10)/factor;
9669 baseval2 = (delta*10)/(factor+1);
9670 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
9671 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
9674 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
9680 static void dhd_dump_latency(void)
9683 int d1, d2, d3, d4, d5;
9685 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
9686 for (i = 0; i < TSMAX; i++) {
9687 d1 = ts[i].t2 - ts[i].t1;
9688 d2 = ts[i].t3 - ts[i].t2;
9689 d3 = ts[i].t4 - ts[i].t3;
9690 d4 = ts[i].t4 - ts[i].t1;
9691 d5 = ts[max].t4-ts[max].t1;
9692 if (d4 > d5 && d4 > 0) {
9695 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
9696 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
9700 printf("current idx = %d \n", tsidx);
9702 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
9703 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
9704 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
9705 maxdelayts.t2 - maxdelayts.t1,
9706 maxdelayts.t3 - maxdelayts.t2,
9707 maxdelayts.t4 - maxdelayts.t3,
9708 maxdelayts.t4 - maxdelayts.t1);
9713 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
9725 memset(&ioc, 0, sizeof(ioc));
9726 memset(&tsf_buf, 0, sizeof(tsf_buf));
9728 ioc.cmd = WLC_GET_VAR;
9730 ioc.len = (uint)sizeof(buf);
9733 strncpy(buf, "tsf", sizeof(buf) - 1);
9734 buf[sizeof(buf) - 1] = '\0';
9735 s1 = dhd_get_htsf(dhd, 0);
9736 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
9738 DHD_ERROR(("%s: tsf is not supported by device\n",
9739 dhd_ifname(&dhd->pub, ifidx)));
9744 s2 = dhd_get_htsf(dhd, 0);
9746 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
9747 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
9748 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
9749 dhd->htsf.coefdec2, s2-tsf_buf.low);
9750 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
9754 void htsf_update(dhd_info_t *dhd, void *data)
9756 static ulong cur_cycle = 0, prev_cycle = 0;
9757 uint32 htsf, tsf_delta = 0;
9758 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
9762 /* cycles_t in inlcude/mips/timex.h */
9766 prev_cycle = cur_cycle;
9769 if (cur_cycle > prev_cycle)
9770 cyc_delta = cur_cycle - prev_cycle;
9774 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
9778 printf(" tsf update ata point er is null \n");
9780 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
9781 memcpy(&cur_tsf, data, sizeof(tsf_t));
9783 if (cur_tsf.low == 0) {
9784 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
9788 if (cur_tsf.low > prev_tsf.low)
9789 tsf_delta = (cur_tsf.low - prev_tsf.low);
9791 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
9792 cur_tsf.low, prev_tsf.low));
9793 if (cur_tsf.high > prev_tsf.high) {
9794 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
9795 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
9798 return; /* do not update */
9802 hfactor = cyc_delta / tsf_delta;
9803 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
9804 dec1 = tmp/tsf_delta;
9805 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
9806 tmp = (tmp - (dec1*tsf_delta))*10;
9807 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
9826 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
9827 dhd->htsf.coef = hfactor;
9828 dhd->htsf.last_cycle = cur_cycle;
9829 dhd->htsf.last_tsf = cur_tsf.low;
9830 dhd->htsf.coefdec1 = dec1;
9831 dhd->htsf.coefdec2 = dec2;
9834 htsf = prev_tsf.low;
9838 #endif /* WLMEDIA_HTSF */
9840 #ifdef CUSTOM_SET_CPUCORE
9841 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
9843 int e_dpc = 0, e_rxf = 0, retry_set = 0;
9845 if (!(dhd->chan_isvht80)) {
9846 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
9853 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9854 cpumask_of(DPC_CPUCORE));
9856 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9857 cpumask_of(PRIMARY_CPUCORE));
9859 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9860 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
9865 } while (e_dpc < 0);
9870 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9871 cpumask_of(RXF_CPUCORE));
9873 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9874 cpumask_of(PRIMARY_CPUCORE));
9876 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9877 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
9882 } while (e_rxf < 0);
9884 #ifdef DHD_OF_SUPPORT
9885 interrupt_set_cpucore(set);
9886 #endif /* DHD_OF_SUPPORT */
9887 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
9891 #endif /* CUSTOM_SET_CPUCORE */
9892 #if defined(DHD_TCP_WINSIZE_ADJUST)
9893 static int dhd_port_list_match(int port)
9896 for (i = 0; i < MAX_TARGET_PORTS; i++) {
9897 if (target_ports[i] == port)
9902 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
9904 struct iphdr *ipheader;
9905 struct tcphdr *tcpheader;
9907 int32 incremental_checksum;
9909 if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
9911 if (skb == NULL || skb->data == NULL)
9914 ipheader = (struct iphdr*)(skb->data);
9916 if (ipheader->protocol == IPPROTO_TCP) {
9917 tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
9919 win_size = ntoh16(tcpheader->window);
9920 if (win_size < MIN_TCP_WIN_SIZE &&
9921 dhd_port_list_match(ntoh16(tcpheader->dest))) {
9922 incremental_checksum = ntoh16(tcpheader->check);
9923 incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
9924 if (incremental_checksum < 0)
9925 --incremental_checksum;
9926 tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
9927 tcpheader->check = hton16((unsigned short)incremental_checksum);
9930 skb_push(skb, (ipheader->ihl)<<2);
9933 #endif /* DHD_TCP_WINSIZE_ADJUST */
9935 /* Get interface specific ap_isolate configuration */
9936 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
9938 dhd_info_t *dhd = dhdp->info;
9941 ASSERT(idx < DHD_MAX_IFS);
9943 ifp = dhd->iflist[idx];
9945 return ifp->ap_isolate;
9948 /* Set interface specific ap_isolate configuration */
9949 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
9951 dhd_info_t *dhd = dhdp->info;
9954 ASSERT(idx < DHD_MAX_IFS);
9956 ifp = dhd->iflist[idx];
9958 ifp->ap_isolate = val;
9964 /* Returns interface specific WMF configuration */
9965 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
9967 dhd_info_t *dhd = dhdp->info;
9970 ASSERT(idx < DHD_MAX_IFS);
9972 ifp = dhd->iflist[idx];
9975 #endif /* DHD_WMF */
9978 #ifdef DHD_UNICAST_DHCP
9980 dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
9981 uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
9983 uint8 *frame = PKTDATA(pub->osh, pktbuf);
9984 int length = PKTLEN(pub->osh, pktbuf);
9985 uint8 *pt; /* Pointer to type field */
9988 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
9989 if (length < ETHER_HDR_LEN) {
9990 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
9991 __FUNCTION__, length));
9993 } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
9994 /* Frame is Ethernet II */
9995 pt = frame + ETHER_TYPE_OFFSET;
9996 } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
9997 !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
9998 pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
10001 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
10006 ethertype = ntoh16_ua(pt);
10008 /* Skip VLAN tag, if any */
10009 if (ethertype == ETHER_TYPE_8021Q) {
10010 pt += VLAN_TAG_LEN;
10012 if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
10013 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
10014 __FUNCTION__, length));
10018 ethertype = ntoh16_ua(pt);
10021 *data_ptr = pt + ETHER_TYPE_LEN;
10022 *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
10023 *et_ptr = ethertype;
10029 dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
10030 uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
10032 struct ipv4_hdr *iph; /* IP frame pointer */
10033 int iplen; /* IP frame length */
10034 uint16 ethertype, iphdrlen, ippktlen;
10039 if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
10040 &iplen, ðertype, &snap) != 0)
10043 if (ethertype != ETHER_TYPE_IP) {
10047 /* We support IPv4 only */
10048 if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
10052 /* Header length sanity */
10053 iphdrlen = IPV4_HLEN(iph);
10056 * Packet length sanity; sometimes we receive eth-frame size bigger
10057 * than the IP content, which results in a bad tcp chksum
10059 ippktlen = ntoh16(iph->tot_len);
10060 if (ippktlen < iplen) {
10062 DHD_INFO(("%s: extra frame length ignored\n",
10065 } else if (ippktlen > iplen) {
10066 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
10067 __FUNCTION__, ippktlen - iplen));
10071 if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
10072 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
10073 __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
10078 * We don't handle fragmented IP packets. A first frag is indicated by the MF
10079 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
10081 iph_frag = ntoh16(iph->frag);
10083 if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
10084 DHD_INFO(("DHD:%s: IP fragment not handled\n",
10089 prot = IPV4_PROT(iph);
10091 *data_ptr = (((uint8 *)iph) + iphdrlen);
10092 *len_ptr = iplen - iphdrlen;
10097 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
10099 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
10101 dhd_sta_t* stainfo;
10102 uint8 *eh = PKTDATA(pub->osh, pktbuf);
10111 if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
10113 if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
10115 if (prot != IP_PROT_UDP)
10117 /* check frame length, at least UDP_HDR_LEN */
10118 if (udpl < UDP_HDR_LEN) {
10119 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
10123 port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
10124 /* only process DHCP packets from server to client */
10125 if (port != DHCP_PORT_CLIENT)
10128 dhcp = udph + UDP_HDR_LEN;
10129 dhcpl = udpl - UDP_HDR_LEN;
10131 if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
10132 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
10136 /* only process DHCP reply(offer/ack) packets */
10137 if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
10139 chaddr = dhcp + DHCP_CHADDR_OFFSET;
10140 stainfo = dhd_find_sta(pub, ifidx, chaddr);
10142 bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
10147 #endif /* DHD_UNICAST_DHD */
10148 #ifdef DHD_L2_FILTER
10149 /* Check if packet type is ICMP ECHO */
10151 int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
10153 struct bcmicmp_hdr *icmph;
10157 if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
10159 if (prot == IP_PROT_ICMP) {
10160 if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
10165 #endif /* DHD_L2_FILTER */
10167 #ifdef SET_RPS_CPUS
10168 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
10170 struct rps_map *old_map, *map;
10171 cpumask_var_t mask;
10173 static DEFINE_SPINLOCK(rps_map_lock);
10175 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10177 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
10178 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
10182 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
10184 free_cpumask_var(mask);
10185 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
10189 map = kzalloc(max_t(unsigned int,
10190 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
10193 free_cpumask_var(mask);
10194 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
10199 for_each_cpu(cpu, mask)
10200 map->cpus[i++] = cpu;
10206 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
10210 spin_lock(&rps_map_lock);
10211 old_map = rcu_dereference_protected(queue->rps_map,
10212 lockdep_is_held(&rps_map_lock));
10213 rcu_assign_pointer(queue->rps_map, map);
10214 spin_unlock(&rps_map_lock);
10217 static_key_slow_inc(&rps_needed);
10219 kfree_rcu(old_map, rcu);
10220 static_key_slow_dec(&rps_needed);
10222 free_cpumask_var(mask);
10224 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
10228 void custom_rps_map_clear(struct netdev_rx_queue *queue)
10230 struct rps_map *map;
10232 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10234 map = rcu_dereference_protected(queue->rps_map, 1);
10236 RCU_INIT_POINTER(queue->rps_map, NULL);
10237 kfree_rcu(map, rcu);
10238 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
10241 #endif /* SET_RPS_CPUS */
10243 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
10245 SDA_setSharedMemory4Send(unsigned int buffer_id,
10246 unsigned char *buffer, unsigned int buffer_size,
10247 unsigned int packet_size, unsigned int headroom_size)
10249 dhd_info_t *dhd = dhd_global;
10251 sda_packet_length = packet_size;
10259 SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb)
10261 dhd_info_t *dhd = dhd_global;
10270 SDA_getTsf(unsigned char vif_id)
10272 dhd_info_t *dhd = dhd_global;
10274 char buf[WLC_IOCTL_SMLEN];
10282 memset(buf, 0, sizeof(buf));
10284 if (vif_id == 0) /* wlan0 tsf */
10285 ifidx = dhd_ifname2idx(dhd, "wlan0");
10286 else if (vif_id == 1) /* p2p0 tsf */
10287 ifidx = dhd_ifname2idx(dhd, "p2p0");
10289 bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf));
10291 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) {
10292 DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__));
10296 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
10297 tsf_val = (uint64)tsf_buf.high;
10298 DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
10299 __FUNCTION__, tsf_buf.high, tsf_buf.low));
10301 return ((tsf_val << 32) | tsf_buf.low);
10303 EXPORT_SYMBOL(SDA_getTsf);
10308 dhd_info_t *dhd = dhd_global;
10310 char iovbuf[WLC_IOCTL_SMLEN];
10312 bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf));
10313 dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
10315 DHD_TRACE(("%s\n", __FUNCTION__));
10319 extern struct net_device *wl0dot1_dev;
10322 BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size)
10324 struct sk_buff *skb;
10325 sda_packet_t *shm_packet = packet;
10326 dhd_info_t *dhd = dhd_global;
10329 static unsigned int cnt_t = 1;
10335 if (dhd->is_wlanaudio_blist) {
10336 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
10337 if (dhd->wlanaudio_blist[cnt].is_blacklist == true) {
10338 if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet,
10339 shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN))
10345 if ((cnt_t % 10000) == 0)
10350 /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
10352 skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC);
10354 skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE);
10355 skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE);
10356 skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */
10359 skb->dev = wl0dot1_dev;
10360 shm_packet->txTsf = 0x0;
10361 shm_packet->rxTsf = 0x0;
10362 memcpy(skb->data, &shm_packet->headroom,
10363 sda_packet_length - OFFSETOF(sda_packet_t, headroom));
10364 shm_packet->desc.ready_to_copy = 0;
10366 dhd_start_xmit(skb, skb->dev);
10370 SDA_registerCallback4Recv(unsigned char *pBufferTotal,
10371 unsigned int BufferTotalSize)
10373 dhd_info_t *dhd = dhd_global;
10382 SDA_setSharedMemory4Recv(unsigned char *pBufferTotal,
10383 unsigned int BufferTotalSize,
10384 unsigned int BufferUnitSize,
10385 unsigned int Headroomsize)
10387 dhd_info_t *dhd = dhd_global;
10396 SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize)
10398 dhd_info_t *dhd = dhd_global;
10405 EXPORT_SYMBOL(SDA_setSharedMemory4Send);
10406 EXPORT_SYMBOL(SDA_registerCallback4SendDone);
10407 EXPORT_SYMBOL(SDA_syncTsf);
10408 EXPORT_SYMBOL(SDA_function4Send);
10409 EXPORT_SYMBOL(SDA_registerCallback4Recv);
10410 EXPORT_SYMBOL(SDA_setSharedMemory4Recv);
10411 EXPORT_SYMBOL(SDA_function4RecvDone);
10413 #endif /* CUSTOMER_HW20 && WLANAUDIO */
10415 void *dhd_get_pub(struct net_device *dev)
10417 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
10419 return (void *)&dhdinfo->pub;