2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * $Copyright Open Broadcom Corporation$
7 * $Id: dhd_linux.c 491481 2014-07-16 14:08:43Z $
14 #include <linux/syscalls.h>
15 #include <event_log.h>
16 #endif /* SHOW_LOGTRACE */
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/random.h>
28 #include <linux/spinlock.h>
29 #include <linux/ethtool.h>
30 #include <linux/fcntl.h>
33 #include <linux/reboot.h>
34 #include <linux/notifier.h>
35 #include <net/addrconf.h>
36 #ifdef ENABLE_ADAPTIVE_SCHED
37 #include <linux/cpufreq.h>
38 #endif /* ENABLE_ADAPTIVE_SCHED */
40 #include <asm/uaccess.h>
41 #include <asm/unaligned.h>
45 #include <bcmendian.h>
48 #include <proto/ethernet.h>
49 #include <proto/bcmevent.h>
50 #include <proto/vlan.h>
51 #include <proto/bcmudp.h>
52 #include <proto/bcmdhcp.h>
54 #include <proto/bcmicmp.h>
56 #include <proto/802.3.h>
58 #include <dngl_stats.h>
59 #include <dhd_linux_wq.h>
61 #include <dhd_linux.h>
62 #ifdef PCIE_FULL_DONGLE
63 #include <dhd_flowring.h>
66 #include <dhd_proto.h>
67 #include <dhd_config.h>
69 #ifdef CONFIG_HAS_WAKELOCK
70 #include <linux/wakelock.h>
73 #include <wl_cfg80211.h>
79 #include <proto/802.11_bta.h>
80 #include <proto/bt_amp_hci.h>
85 #include <linux/compat.h>
89 #include <dhd_wmf_linux.h>
92 #ifdef AMPDU_VO_ENABLE
93 #include <proto/802.1d.h>
94 #endif /* AMPDU_VO_ENABLE */
95 #ifdef DHDTCPACK_SUPPRESS
97 #endif /* DHDTCPACK_SUPPRESS */
99 #if defined(DHD_TCP_WINSIZE_ADJUST)
100 #include <linux/tcp.h>
102 #endif /* DHD_TCP_WINSIZE_ADJUST */
105 #include <linux/time.h>
108 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
109 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
110 #define TSMAX 1000 /* max no. of timing record kept */
113 static uint32 tsidx = 0;
114 static uint32 htsf_seqnum = 0;
116 struct timeval tsync;
117 static uint32 tsport = 5010;
119 typedef struct histo_ {
123 #if !ISPOWEROF2(DHD_SDALIGN)
124 #error DHD_SDALIGN is not a power of 2!
127 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
128 #endif /* WLMEDIA_HTSF */
130 #if defined(DHD_TCP_WINSIZE_ADJUST)
131 #define MIN_TCP_WIN_SIZE 18000
132 #define WIN_SIZE_SCALE_FACTOR 2
133 #define MAX_TARGET_PORTS 5
135 static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
136 static uint dhd_use_tcp_window_size_adjust = FALSE;
137 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
138 #endif /* DHD_TCP_WINSIZE_ADJUST */
142 extern bool ap_cfg_running;
143 extern bool ap_fw_loaded;
147 #ifdef ENABLE_ADAPTIVE_SCHED
148 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
149 #ifndef CUSTOM_CPUFREQ_THRESH
150 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
151 #endif /* CUSTOM_CPUFREQ_THRESH */
152 #endif /* ENABLE_ADAPTIVE_SCHED */
154 /* enable HOSTIP cache update from the host side when an eth0:N is up */
155 #define AOE_IP_ALIAS_SUPPORT 1
159 #include <bcm_rpc_tp.h>
162 #include <wlfc_proto.h>
163 #include <dhd_wlfc.h>
166 #include <wl_android.h>
168 /* Maximum STA per radio */
169 #define DHD_MAX_STA 32
172 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
173 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
174 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
176 #ifdef ARP_OFFLOAD_SUPPORT
177 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
178 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
179 unsigned long event, void *ptr);
180 static struct notifier_block dhd_inetaddr_notifier = {
181 .notifier_call = dhd_inetaddr_notifier_call
183 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
184 * created in kernel notifier link list (with 'next' pointing to itself)
186 static bool dhd_inetaddr_notifier_registered = FALSE;
187 #endif /* ARP_OFFLOAD_SUPPORT */
190 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
191 unsigned long event, void *ptr);
192 static struct notifier_block dhd_inet6addr_notifier = {
193 .notifier_call = dhd_inet6addr_notifier_call
195 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
196 * created in kernel notifier link list (with 'next' pointing to itself)
198 static bool dhd_inet6addr_notifier_registered = FALSE;
201 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
202 #include <linux/suspend.h>
203 volatile bool dhd_mmc_suspend = FALSE;
204 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
205 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
207 #if defined(OOB_INTR_ONLY)
208 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
210 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
211 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
213 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
214 MODULE_LICENSE("GPL v2");
215 #endif /* LinuxVer */
220 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
222 #ifndef PROP_TXSTATUS
223 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
225 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
227 #endif /* BCM_FD_AGGR */
230 extern bool dhd_wlfc_skip_fc(void);
231 extern void dhd_wlfc_plat_init(void *dhd);
232 extern void dhd_wlfc_plat_deinit(void *dhd);
233 #endif /* PROP_TXSTATUS */
235 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
241 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
243 /* Linux wireless extension support */
244 #if defined(WL_WIRELESS_EXT)
246 extern wl_iw_extra_params_t g_wl_iw_params;
247 #endif /* defined(WL_WIRELESS_EXT) */
249 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
250 #include <linux/earlysuspend.h>
251 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
253 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
255 #ifdef PKT_FILTER_SUPPORT
256 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
257 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
258 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
263 extern int dhd_read_macaddr(struct dhd_info *dhd);
265 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
268 extern int dhd_write_macaddr(struct ether_addr *mac);
270 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
273 #if defined(SOFTAP_TPUT_ENHANCE)
274 extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
275 extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
276 #endif /* SOFTAP_TPUT_ENHANCE */
279 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
280 static struct notifier_block dhd_reboot_notifier = {
281 .notifier_call = dhd_reboot_callback,
286 typedef struct dhd_if_event {
287 struct list_head list;
288 wl_event_data_if_t event;
289 char name[IFNAMSIZ+1];
290 uint8 mac[ETHER_ADDR_LEN];
293 /* Interface control information */
294 typedef struct dhd_if {
295 struct dhd_info *info; /* back pointer to dhd_info */
296 /* OS/stack specifics */
297 struct net_device *net;
298 int idx; /* iface idx in dongle */
299 uint subunit; /* subunit */
300 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
303 uint8 bssidx; /* bsscfg index for the interface */
304 bool attached; /* Delayed attachment when unset */
305 bool txflowcontrol; /* Per interface flow control indicator */
306 char name[IFNAMSIZ+1]; /* linux interface name */
307 struct net_device_stats stats;
309 dhd_wmf_t wmf; /* per bsscfg wmf setting */
311 #ifdef PCIE_FULL_DONGLE
312 struct list_head sta_list; /* sll of associated stations */
313 #if !defined(BCM_GMAC3)
314 spinlock_t sta_list_lock; /* lock for manipulating sll */
315 #endif /* ! BCM_GMAC3 */
316 #endif /* PCIE_FULL_DONGLE */
317 uint32 ap_isolate; /* ap-isolation settings */
330 uint32 coef; /* scaling factor */
331 uint32 coefdec1; /* first decimal */
332 uint32 coefdec2; /* second decimal */
342 static tstamp_t ts[TSMAX];
343 static tstamp_t maxdelayts;
344 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
346 #endif /* WLMEDIA_HTSF */
348 struct ipv6_work_info_t {
354 /* When Perimeter locks are deployed, any blocking calls must be preceeded
355 * with a PERIM UNLOCK and followed by a PERIM LOCK.
356 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
357 * wait_event_timeout().
360 /* Local private structure (extension of pub) */
361 typedef struct dhd_info {
362 #if defined(WL_WIRELESS_EXT)
363 wl_iw_t iw; /* wireless extensions state (must be first) */
364 #endif /* defined(WL_WIRELESS_EXT) */
366 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
368 void *adapter; /* adapter information, interrupt, fw path etc. */
369 char fw_path[PATH_MAX]; /* path to firmware image */
370 char nv_path[PATH_MAX]; /* path to nvram vars file */
371 char conf_path[PATH_MAX]; /* path to config vars file */
373 struct semaphore proto_sem;
375 spinlock_t wlfc_spinlock;
377 #endif /* PROP_TXSTATUS */
381 wait_queue_head_t ioctl_resp_wait;
382 uint32 default_wd_interval;
384 struct timer_list timer;
386 struct tasklet_struct tasklet;
391 struct semaphore sdsem;
392 tsk_ctl_t thr_dpc_ctl;
393 tsk_ctl_t thr_wdt_ctl;
395 tsk_ctl_t thr_rxf_ctl;
397 bool rxthread_enabled;
400 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
401 struct wake_lock wl_wifi; /* Wifi wakelock */
402 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
403 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
404 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
407 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
408 /* net_device interface lock, prevent race conditions among net_dev interface
409 * calls and wifi_on or wifi_off
411 struct mutex dhd_net_if_mutex;
412 struct mutex dhd_suspend_mutex;
414 spinlock_t wakelock_spinlock;
415 uint32 wakelock_counter;
416 int wakelock_wd_counter;
417 int wakelock_rx_timeout_enable;
418 int wakelock_ctrl_timeout_enable;
420 uint32 wakelock_before_waive;
422 /* Thread to issue ioctl for multicast */
423 wait_queue_head_t ctrl_wait;
424 atomic_t pend_8021x_cnt;
425 dhd_attach_states_t dhd_state;
427 dhd_event_log_t event_data;
428 #endif /* SHOW_LOGTRACE */
430 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
431 struct early_suspend early_suspend;
432 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
434 #ifdef ARP_OFFLOAD_SUPPORT
436 #endif /* ARP_OFFLOAD_SUPPORT */
440 struct timer_list rpcth_timer;
441 bool rpcth_timer_active;
444 #ifdef DHDTCPACK_SUPPRESS
445 spinlock_t tcpack_lock;
446 #endif /* DHDTCPACK_SUPPRESS */
447 void *dhd_deferred_wq;
448 #ifdef DEBUG_CPU_FREQ
449 struct notifier_block freq_trans;
450 int __percpu *new_freq;
453 struct notifier_block pm_notifier;
456 #define DHDIF_FWDER(dhdif) FALSE
458 /* Flag to indicate if we should download firmware on driver load */
459 uint dhd_download_fw_on_driverload = TRUE;
461 /* Definitions to provide path to the firmware and nvram
462 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
464 char firmware_path[MOD_PARAM_PATHLEN];
465 char nvram_path[MOD_PARAM_PATHLEN];
466 char config_path[MOD_PARAM_PATHLEN];
468 /* backup buffer for firmware and nvram path */
469 char fw_bak_path[MOD_PARAM_PATHLEN];
470 char nv_bak_path[MOD_PARAM_PATHLEN];
472 /* information string to keep firmware, chio, cheip version info visiable from log */
473 char info_string[MOD_PARAM_INFOLEN];
474 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
476 int disable_proptx = 0;
477 module_param(op_mode, int, 0644);
478 extern int wl_control_wl_start(struct net_device *dev);
479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
480 struct semaphore dhd_registration_sem;
481 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
483 /* deferred handlers */
484 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
485 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
486 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
487 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
489 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
493 extern void dhd_netdev_free(struct net_device *ndev);
494 #endif /* WL_CFG80211 */
497 module_param(dhd_msg_level, int, 0);
498 #if defined(WL_WIRELESS_EXT)
499 module_param(iw_msg_level, int, 0);
502 module_param(wl_dbg_level, int, 0);
504 module_param(android_msg_level, int, 0);
505 module_param(config_msg_level, int, 0);
507 #ifdef ARP_OFFLOAD_SUPPORT
508 /* ARP offload enable */
509 uint dhd_arp_enable = TRUE;
510 module_param(dhd_arp_enable, uint, 0);
512 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
514 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
516 module_param(dhd_arp_mode, uint, 0);
517 #endif /* ARP_OFFLOAD_SUPPORT */
519 /* Disable Prop tx */
520 module_param(disable_proptx, int, 0644);
521 /* load firmware and/or nvram values from the filesystem */
522 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
523 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
524 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
526 /* Watchdog interval */
528 /* extend watchdog expiration to 2 seconds when DPC is running */
529 #define WATCHDOG_EXTEND_INTERVAL (2000)
531 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
532 module_param(dhd_watchdog_ms, uint, 0);
534 #if defined(DHD_DEBUG)
535 /* Console poll interval */
536 uint dhd_console_ms = 0;
537 module_param(dhd_console_ms, uint, 0644);
538 #endif /* defined(DHD_DEBUG) */
541 uint dhd_slpauto = TRUE;
542 module_param(dhd_slpauto, uint, 0);
544 #ifdef PKT_FILTER_SUPPORT
545 /* Global Pkt filter enable control */
546 uint dhd_pkt_filter_enable = TRUE;
547 module_param(dhd_pkt_filter_enable, uint, 0);
550 /* Pkt filter init setup */
551 uint dhd_pkt_filter_init = 0;
552 module_param(dhd_pkt_filter_init, uint, 0);
554 /* Pkt filter mode control */
555 uint dhd_master_mode = FALSE;
556 module_param(dhd_master_mode, uint, 0);
558 int dhd_watchdog_prio = 0;
559 module_param(dhd_watchdog_prio, int, 0);
561 /* DPC thread priority */
562 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
563 module_param(dhd_dpc_prio, int, 0);
565 /* RX frame thread priority */
566 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
567 module_param(dhd_rxf_prio, int, 0);
569 #if !defined(BCMDHDUSB)
570 extern int dhd_dongle_ramsize;
571 module_param(dhd_dongle_ramsize, int, 0);
572 #endif /* BCMDHDUSB */
574 /* Keep track of number of instances */
575 static int dhd_found = 0;
576 static int instance_base = 0; /* Starting instance number */
577 module_param(instance_base, int, 0644);
580 /* DHD Perimiter lock only used in router with bypass forwarding. */
581 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
582 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
583 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
584 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
585 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
587 #ifdef PCIE_FULL_DONGLE
588 #if defined(BCM_GMAC3)
589 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
590 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
591 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
592 #else /* ! BCM_GMAC3 */
593 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
594 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
595 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
596 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
597 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
598 #endif /* ! BCM_GMAC3 */
599 #endif /* PCIE_FULL_DONGLE */
601 /* Control fw roaming */
603 uint dhd_roam_disable = 0;
605 uint dhd_roam_disable = 0;
608 /* Control radio state */
609 uint dhd_radio_up = 1;
611 /* Network inteface name */
612 char iface_name[IFNAMSIZ] = {'\0'};
613 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
615 /* The following are specific to the SDIO dongle */
617 /* IOCTL response timeout */
618 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
620 /* Idle timeout for backplane clock */
621 int dhd_idletime = DHD_IDLETIME_TICKS;
622 module_param(dhd_idletime, int, 0);
625 uint dhd_poll = FALSE;
626 module_param(dhd_poll, uint, 0);
629 uint dhd_intr = TRUE;
630 module_param(dhd_intr, uint, 0);
632 /* SDIO Drive Strength (in milliamps) */
633 uint dhd_sdiod_drive_strength = 6;
634 module_param(dhd_sdiod_drive_strength, uint, 0);
638 extern uint dhd_txbound;
639 extern uint dhd_rxbound;
640 module_param(dhd_txbound, uint, 0);
641 module_param(dhd_rxbound, uint, 0);
643 /* Deferred transmits */
644 extern uint dhd_deferred_tx;
645 module_param(dhd_deferred_tx, uint, 0);
648 extern void dhd_dbg_init(dhd_pub_t *dhdp);
649 extern void dhd_dbg_remove(void);
650 #endif /* BCMDBGFS */
656 /* Echo packet generator (pkts/s) */
658 module_param(dhd_pktgen, uint, 0);
660 /* Echo packet len (0 => sawtooth, max 2040) */
661 uint dhd_pktgen_len = 0;
662 module_param(dhd_pktgen_len, uint, 0);
665 #if defined(BCMSUP_4WAY_HANDSHAKE)
666 /* Use in dongle supplicant for 4-way handshake */
667 uint dhd_use_idsup = 0;
668 module_param(dhd_use_idsup, uint, 0);
669 #endif /* BCMSUP_4WAY_HANDSHAKE */
671 extern char dhd_version[];
673 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
674 static void dhd_net_if_lock_local(dhd_info_t *dhd);
675 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
676 static void dhd_suspend_lock(dhd_pub_t *dhdp);
677 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
680 void htsf_update(dhd_info_t *dhd, void *data);
681 tsf_t prev_tsf, cur_tsf;
683 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
684 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
685 static void dhd_dump_latency(void);
686 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
687 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
688 static void dhd_dump_htsfhisto(histo_t *his, char *s);
689 #endif /* WLMEDIA_HTSF */
691 /* Monitor interface */
692 int dhd_monitor_init(void *dhd_pub);
693 int dhd_monitor_uninit(void);
696 #if defined(WL_WIRELESS_EXT)
697 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
698 #endif /* defined(WL_WIRELESS_EXT) */
700 static void dhd_dpc(ulong data);
702 extern int dhd_wait_pend8021x(struct net_device *dev);
703 void dhd_os_wd_timer_extend(void *bus, bool extend);
707 #error TOE requires BDC
709 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
710 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
713 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
714 wl_event_msg_t *event_ptr, void **data_ptr);
715 #ifdef DHD_UNICAST_DHCP
716 static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
717 static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
718 int *len_ptr, uint8 *prot_ptr);
719 static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
720 int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
722 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
723 #endif /* DHD_UNICAST_DHCP */
725 static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
727 #if defined(CONFIG_PM_SLEEP)
728 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
730 int ret = NOTIFY_DONE;
731 bool suspend = FALSE;
732 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
734 BCM_REFERENCE(dhdinfo);
736 case PM_HIBERNATION_PREPARE:
737 case PM_SUSPEND_PREPARE:
740 case PM_POST_HIBERNATION:
741 case PM_POST_SUSPEND:
746 #if defined(SUPPORT_P2P_GO_PS)
749 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
750 dhd_wlfc_suspend(&dhdinfo->pub);
751 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
753 dhd_wlfc_resume(&dhdinfo->pub);
755 #endif /* defined(SUPPORT_P2P_GO_PS) */
757 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
758 KERNEL_VERSION(2, 6, 39))
759 dhd_mmc_suspend = suspend;
766 static struct notifier_block dhd_pm_notifier = {
767 .notifier_call = dhd_pm_callback,
770 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
771 * created in kernel notifier link list (with 'next' pointing to itself)
773 static bool dhd_pm_notifier_registered = FALSE;
775 extern int register_pm_notifier(struct notifier_block *nb);
776 extern int unregister_pm_notifier(struct notifier_block *nb);
777 #endif /* CONFIG_PM_SLEEP */
779 /* Request scheduling of the bus rx frame */
780 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
781 static void dhd_os_rxflock(dhd_pub_t *pub);
782 static void dhd_os_rxfunlock(dhd_pub_t *pub);
784 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
785 typedef struct dhd_dev_priv {
786 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
787 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
788 int ifidx; /* interface index */
791 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
792 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
793 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
794 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
795 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
797 /** Clear the dhd net_device's private structure. */
799 dhd_dev_priv_clear(struct net_device * dev)
801 dhd_dev_priv_t * dev_priv;
802 ASSERT(dev != (struct net_device *)NULL);
803 dev_priv = DHD_DEV_PRIV(dev);
804 dev_priv->dhd = (dhd_info_t *)NULL;
805 dev_priv->ifp = (dhd_if_t *)NULL;
806 dev_priv->ifidx = DHD_BAD_IF;
809 /** Setup the dhd net_device's private structure. */
811 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
814 dhd_dev_priv_t * dev_priv;
815 ASSERT(dev != (struct net_device *)NULL);
816 dev_priv = DHD_DEV_PRIV(dev);
819 dev_priv->ifidx = ifidx;
822 #ifdef PCIE_FULL_DONGLE
824 /** Dummy objects are defined with state representing bad|down.
825 * Performance gains from reducing branch conditionals, instruction parallelism,
826 * dual issue, reducing load shadows, avail of larger pipelines.
827 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
828 * is accessed via the dhd_sta_t.
831 /* Dummy dhd_info object */
832 dhd_info_t dhd_info_null = {
833 #if defined(BCM_GMAC3)
837 .info = &dhd_info_null,
838 #ifdef DHDTCPACK_SUPPRESS
839 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
840 #endif /* DHDTCPACK_SUPPRESS */
841 .up = FALSE, .busstate = DHD_BUS_DOWN
844 #define DHD_INFO_NULL (&dhd_info_null)
845 #define DHD_PUB_NULL (&dhd_info_null.pub)
847 /* Dummy netdevice object */
848 struct net_device dhd_net_dev_null = {
849 .reg_state = NETREG_UNREGISTERED
851 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
853 /* Dummy dhd_if object */
854 dhd_if_t dhd_if_null = {
855 #if defined(BCM_GMAC3)
859 .wmf = { .wmf_enable = TRUE },
861 .info = DHD_INFO_NULL,
862 .net = DHD_NET_DEV_NULL,
865 #define DHD_IF_NULL (&dhd_if_null)
867 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
869 /** Interface STA list management. */
871 /** Fetch the dhd_if object, given the interface index in the dhd. */
872 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
874 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
875 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
876 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
878 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
879 static void dhd_if_del_sta_list(dhd_if_t * ifp);
880 static void dhd_if_flush_sta(dhd_if_t * ifp);
882 /* Construct/Destruct a sta pool. */
883 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
884 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
887 /* Return interface pointer */
888 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
890 ASSERT(ifidx < DHD_MAX_IFS);
891 return dhdp->info->iflist[ifidx];
894 /** Reset a dhd_sta object and free into the dhd pool. */
896 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
900 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
902 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
903 id16_map_free(dhdp->staid_allocator, sta->idx);
904 for (prio = 0; prio < (int)NUMPRIO; prio++)
905 sta->flowid[prio] = FLOWID_INVALID;
906 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
907 sta->ifidx = DHD_BAD_IF;
908 bzero(sta->ea.octet, ETHER_ADDR_LEN);
909 INIT_LIST_HEAD(&sta->list);
910 sta->idx = ID16_INVALID; /* implying free */
913 /** Allocate a dhd_sta object from the dhd pool. */
915 dhd_sta_alloc(dhd_pub_t * dhdp)
919 dhd_sta_pool_t * sta_pool;
921 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
923 idx = id16_map_alloc(dhdp->staid_allocator);
924 if (idx == ID16_INVALID) {
925 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
929 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
930 sta = &sta_pool[idx];
932 ASSERT((sta->idx == ID16_INVALID) &&
933 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
934 sta->idx = idx; /* implying allocated */
939 /** Delete all STAs in an interface's STA list. */
941 dhd_if_del_sta_list(dhd_if_t *ifp)
943 dhd_sta_t *sta, *next;
946 DHD_IF_STA_LIST_LOCK(ifp, flags);
948 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
949 #if defined(BCM_GMAC3)
951 /* Remove sta from WOFA forwarder. */
952 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
954 #endif /* BCM_GMAC3 */
955 list_del(&sta->list);
956 dhd_sta_free(&ifp->info->pub, sta);
959 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
964 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
966 dhd_if_flush_sta(dhd_if_t * ifp)
968 #if defined(BCM_GMAC3)
970 if (ifp && (ifp->fwdh != FWDER_NULL)) {
971 dhd_sta_t *sta, *next;
974 DHD_IF_STA_LIST_LOCK(ifp, flags);
976 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
977 /* Remove any sta entry from WOFA forwarder. */
978 fwder_flush(ifp->fwdh, (wofa_t)sta);
981 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
983 #endif /* BCM_GMAC3 */
986 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
988 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
990 int idx, sta_pool_memsz;
992 dhd_sta_pool_t * sta_pool;
993 void * staid_allocator;
995 ASSERT(dhdp != (dhd_pub_t *)NULL);
996 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
998 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
999 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1000 if (staid_allocator == NULL) {
1001 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1005 /* Pre allocate a pool of dhd_sta objects (one extra). */
1006 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1007 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1008 if (sta_pool == NULL) {
1009 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1010 id16_map_fini(dhdp->osh, staid_allocator);
1014 dhdp->sta_pool = sta_pool;
1015 dhdp->staid_allocator = staid_allocator;
1017 /* Initialize all sta(s) for the pre-allocated free pool. */
1018 bzero((uchar *)sta_pool, sta_pool_memsz);
1019 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1020 sta = &sta_pool[idx];
1021 sta->idx = id16_map_alloc(staid_allocator);
1022 ASSERT(sta->idx <= max_sta);
1024 /* Now place them into the pre-allocated free pool. */
1025 for (idx = 1; idx <= max_sta; idx++) {
1026 sta = &sta_pool[idx];
1027 dhd_sta_free(dhdp, sta);
1033 /** Destruct the pool of dhd_sta_t objects.
1034 * Caller must ensure that no STA objects are currently associated with an if.
1037 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1039 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1043 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1044 for (idx = 1; idx <= max_sta; idx++) {
1045 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1046 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1048 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1049 dhdp->sta_pool = NULL;
1052 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1053 dhdp->staid_allocator = NULL;
1056 /** Find STA with MAC address ea in an interface's STA list. */
1058 dhd_find_sta(void *pub, int ifidx, void *ea)
1062 unsigned long flags;
1065 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1067 DHD_IF_STA_LIST_LOCK(ifp, flags);
1069 list_for_each_entry(sta, &ifp->sta_list, list) {
1070 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1071 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1076 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1078 return DHD_STA_NULL;
1081 /** Add STA into the interface's STA list. */
1083 dhd_add_sta(void *pub, int ifidx, void *ea)
1087 unsigned long flags;
1090 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1092 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1093 if (sta == DHD_STA_NULL) {
1094 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1095 return DHD_STA_NULL;
1098 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1100 /* link the sta and the dhd interface */
1103 INIT_LIST_HEAD(&sta->list);
1105 DHD_IF_STA_LIST_LOCK(ifp, flags);
1107 list_add_tail(&sta->list, &ifp->sta_list);
1109 #if defined(BCM_GMAC3)
1111 ASSERT(ISALIGNED(ea, 2));
1112 /* Add sta to WOFA forwarder. */
1113 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1115 #endif /* BCM_GMAC3 */
1117 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1122 /** Delete STA from the interface's STA list. */
1124 dhd_del_sta(void *pub, int ifidx, void *ea)
1126 dhd_sta_t *sta, *next;
1128 unsigned long flags;
1131 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1133 DHD_IF_STA_LIST_LOCK(ifp, flags);
1135 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1136 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1137 #if defined(BCM_GMAC3)
1138 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1139 ASSERT(ISALIGNED(ea, 2));
1140 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1142 #endif /* BCM_GMAC3 */
1143 list_del(&sta->list);
1144 dhd_sta_free(&ifp->info->pub, sta);
1148 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1153 /** Add STA if it doesn't exist. Not reentrant. */
1155 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1159 sta = dhd_find_sta(pub, ifidx, ea);
1163 sta = dhd_add_sta(pub, ifidx, ea);
1169 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1170 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1171 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1172 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1173 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1174 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1175 #endif /* PCIE_FULL_DONGLE */
1178 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1179 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1182 dhd_info_t *dhd = dhdp->info;
1185 ASSERT(bssidx < DHD_MAX_IFS);
1188 for (i = 0; i < DHD_MAX_IFS; i++) {
1189 ifp = dhd->iflist[i];
1190 if (ifp && (ifp->bssidx == bssidx)) {
1191 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1192 ifp->name, bssidx, i));
1199 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1205 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1209 dhd_os_rxflock(dhdp);
1210 store_idx = dhdp->store_idx;
1211 sent_idx = dhdp->sent_idx;
1212 if (dhdp->skbbuf[store_idx] != NULL) {
1213 /* Make sure the previous packets are processed */
1214 dhd_os_rxfunlock(dhdp);
1215 #ifdef RXF_DEQUEUE_ON_BUSY
1216 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1217 skb, store_idx, sent_idx));
1219 #else /* RXF_DEQUEUE_ON_BUSY */
1220 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1221 skb, store_idx, sent_idx));
1222 /* removed msleep here, should use wait_event_timeout if we
1223 * want to give rx frame thread a chance to run
1225 #if defined(WAIT_DEQUEUE)
1229 #endif /* RXF_DEQUEUE_ON_BUSY */
1231 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1232 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1233 dhdp->skbbuf[store_idx] = skb;
1234 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1235 dhd_os_rxfunlock(dhdp);
1240 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1246 dhd_os_rxflock(dhdp);
1248 store_idx = dhdp->store_idx;
1249 sent_idx = dhdp->sent_idx;
1250 skb = dhdp->skbbuf[sent_idx];
1253 dhd_os_rxfunlock(dhdp);
1254 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1255 store_idx, sent_idx));
1259 dhdp->skbbuf[sent_idx] = NULL;
1260 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1262 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1265 dhd_os_rxfunlock(dhdp);
1270 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1272 #ifndef CUSTOMER_HW10
1273 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1274 #endif /* !CUSTOMER_HW10 */
1276 if (prepost) { /* pre process */
1277 dhd_read_macaddr(dhd);
1278 } else { /* post process */
1279 dhd_write_macaddr(&dhd->pub.mac);
1285 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1287 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
1289 bool _apply = FALSE;
1290 /* In case of IBSS mode, apply arp pkt filter */
1291 if (op_mode & DHD_FLAG_IBSS_MODE) {
1295 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1296 if ((dhd->arp_version == 1) &&
1297 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
1305 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1307 void dhd_set_packet_filter(dhd_pub_t *dhd)
1309 #ifdef PKT_FILTER_SUPPORT
1312 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1313 if (dhd_pkt_filter_enable) {
1314 for (i = 0; i < dhd->pktfilter_count; i++) {
1315 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1318 #endif /* PKT_FILTER_SUPPORT */
1321 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1323 #ifdef PKT_FILTER_SUPPORT
1326 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
1327 /* 1 - Enable packet filter, only allow unicast packet to send up */
1328 /* 0 - Disable packet filter */
1329 if (dhd_pkt_filter_enable && (!value ||
1330 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1332 for (i = 0; i < dhd->pktfilter_count; i++) {
1333 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1334 if (value && (i == DHD_ARP_FILTER_NUM) &&
1335 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1336 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1337 "val %d, cnt %d, op_mode 0x%x\n",
1338 value, i, dhd->op_mode));
1341 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1342 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1343 value, dhd_master_mode);
1346 #endif /* PKT_FILTER_SUPPORT */
1349 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1351 #ifndef SUPPORT_PM2_ONLY
1352 int power_mode = PM_MAX;
1353 #endif /* SUPPORT_PM2_ONLY */
1354 /* wl_pkt_filter_enable_t enable_parm; */
1356 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1357 uint roamvar = dhd->conf->roam_off_suspend;
1358 uint nd_ra_filter = 0;
1364 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1365 __FUNCTION__, value, dhd->in_suspend));
1367 dhd_suspend_lock(dhd);
1369 #ifdef CUSTOM_SET_CPUCORE
1370 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1371 /* set specific cpucore */
1372 dhd_set_cpucore(dhd, TRUE);
1373 #endif /* CUSTOM_SET_CPUCORE */
1375 if (value && dhd->in_suspend) {
1376 #ifdef PKT_FILTER_SUPPORT
1377 dhd->early_suspended = 1;
1379 /* Kernel suspended */
1380 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
1382 #ifndef SUPPORT_PM2_ONLY
1383 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1384 sizeof(power_mode), TRUE, 0);
1385 #endif /* SUPPORT_PM2_ONLY */
1387 /* Enable packet filter, only allow unicast packet to send up */
1388 dhd_enable_packet_filter(1, dhd);
1390 /* If DTIM skip is set up as default, force it to wake
1391 * each third DTIM for better power savings. Note that
1392 * one side effect is a chance to miss BC/MC packet.
1394 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1395 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1396 4, iovbuf, sizeof(iovbuf));
1397 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
1399 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1401 /* Disable firmware roaming during suspend */
1402 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1403 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1404 if (FW_SUPPORTED(dhd, ndoe)) {
1405 /* enable IPv6 RA filter in firmware during suspend */
1407 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1408 iovbuf, sizeof(iovbuf));
1409 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1410 sizeof(iovbuf), TRUE, 0)) < 0)
1411 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1415 #ifdef PKT_FILTER_SUPPORT
1416 dhd->early_suspended = 0;
1418 /* Kernel resumed */
1419 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
1421 #ifndef SUPPORT_PM2_ONLY
1422 power_mode = PM_FAST;
1423 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1424 sizeof(power_mode), TRUE, 0);
1425 #endif /* SUPPORT_PM2_ONLY */
1426 #ifdef PKT_FILTER_SUPPORT
1427 /* disable pkt filter */
1428 dhd_enable_packet_filter(0, dhd);
1429 #endif /* PKT_FILTER_SUPPORT */
1431 /* restore pre-suspend setting for dtim_skip */
1432 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1433 4, iovbuf, sizeof(iovbuf));
1435 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1436 roamvar = dhd_roam_disable;
1437 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1438 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1439 if (FW_SUPPORTED(dhd, ndoe)) {
1440 /* disable IPv6 RA filter in firmware during suspend */
1442 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1443 iovbuf, sizeof(iovbuf));
1444 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1445 sizeof(iovbuf), TRUE, 0)) < 0)
1446 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1451 dhd_suspend_unlock(dhd);
1456 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
1458 dhd_pub_t *dhdp = &dhd->pub;
1461 DHD_OS_WAKE_LOCK(dhdp);
1462 DHD_PERIM_LOCK(dhdp);
1464 /* Set flag when early suspend was called */
1465 dhdp->in_suspend = val;
1466 if ((force || !dhdp->suspend_disable_flag) &&
1467 dhd_support_sta_mode(dhdp))
1469 ret = dhd_set_suspend(val, dhdp);
1472 DHD_PERIM_UNLOCK(dhdp);
1473 DHD_OS_WAKE_UNLOCK(dhdp);
1477 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1478 static void dhd_early_suspend(struct early_suspend *h)
1480 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1481 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1484 dhd_suspend_resume_helper(dhd, 1, 0);
1487 static void dhd_late_resume(struct early_suspend *h)
1489 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1490 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1493 dhd_suspend_resume_helper(dhd, 0, 0);
1495 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1498 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1499 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1501 * dhd_timeout_start(&tmo, usec);
1502 * while (!dhd_timeout_expired(&tmo))
1503 * if (poll_something())
1505 * if (dhd_timeout_expired(&tmo))
1510 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
1515 tmo->tick = jiffies_to_usecs(1);
1519 dhd_timeout_expired(dhd_timeout_t *tmo)
1521 /* Does nothing the first call */
1522 if (tmo->increment == 0) {
1527 if (tmo->elapsed >= tmo->limit)
1530 /* Add the delay that's about to take place */
1531 tmo->elapsed += tmo->increment;
1533 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1534 OSL_DELAY(tmo->increment);
1535 tmo->increment *= 2;
1536 if (tmo->increment > tmo->tick)
1537 tmo->increment = tmo->tick;
1539 wait_queue_head_t delay_wait;
1540 DECLARE_WAITQUEUE(wait, current);
1541 init_waitqueue_head(&delay_wait);
1542 add_wait_queue(&delay_wait, &wait);
1543 set_current_state(TASK_INTERRUPTIBLE);
1544 (void)schedule_timeout(1);
1545 remove_wait_queue(&delay_wait, &wait);
1546 set_current_state(TASK_RUNNING);
1553 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1558 while (i < DHD_MAX_IFS) {
1559 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1567 struct net_device * dhd_idx2net(void *pub, int ifidx)
1569 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1570 struct dhd_info *dhd_info;
1572 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1574 dhd_info = dhd_pub->info;
1575 if (dhd_info && dhd_info->iflist[ifidx])
1576 return dhd_info->iflist[ifidx]->net;
1581 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1583 int i = DHD_MAX_IFS;
1587 if (name == NULL || *name == '\0')
1591 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1594 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1596 return i; /* default - the primary interface */
1600 dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
1602 int i = DHD_MAX_IFS;
1607 if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
1610 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
1612 return i; /* default - the primary interface */
1616 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1618 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1622 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1623 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1627 if (dhd->iflist[ifidx] == NULL) {
1628 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1632 if (dhd->iflist[ifidx]->net)
1633 return dhd->iflist[ifidx]->net->name;
1639 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1642 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1645 for (i = 0; i < DHD_MAX_IFS; i++)
1646 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1647 return dhd->iflist[i]->mac_addr;
1654 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1656 struct net_device *dev;
1657 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1658 struct netdev_hw_addr *ha;
1660 struct dev_mc_list *mclist;
1662 uint32 allmulti, cnt;
1669 ASSERT(dhd && dhd->iflist[ifidx]);
1670 dev = dhd->iflist[ifidx]->net;
1673 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1674 netif_addr_lock_bh(dev);
1676 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1677 cnt = netdev_mc_count(dev);
1679 cnt = dev->mc_count;
1680 #endif /* LINUX_VERSION_CODE */
1682 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1683 netif_addr_unlock_bh(dev);
1686 /* Determine initial value of allmulti flag */
1687 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
1689 /* Send down the multicast list first. */
1692 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
1693 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
1694 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1695 dhd_ifname(&dhd->pub, ifidx), cnt));
1699 strncpy(bufp, "mcast_list", buflen - 1);
1700 bufp[buflen - 1] = '\0';
1701 bufp += strlen("mcast_list") + 1;
1704 memcpy(bufp, &cnt, sizeof(cnt));
1705 bufp += sizeof(cnt);
1708 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1709 netif_addr_lock_bh(dev);
1711 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1712 netdev_for_each_mc_addr(ha, dev) {
1715 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
1716 bufp += ETHER_ADDR_LEN;
1720 for (mclist = dev->mc_list; (mclist && (cnt > 0));
1721 cnt--, mclist = mclist->next) {
1722 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
1723 bufp += ETHER_ADDR_LEN;
1725 #endif /* LINUX_VERSION_CODE */
1727 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1728 netif_addr_unlock_bh(dev);
1731 memset(&ioc, 0, sizeof(ioc));
1732 ioc.cmd = WLC_SET_VAR;
1737 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1739 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
1740 dhd_ifname(&dhd->pub, ifidx), cnt));
1741 allmulti = cnt ? TRUE : allmulti;
1744 MFREE(dhd->pub.osh, buf, buflen);
1746 /* Now send the allmulti setting. This is based on the setting in the
1747 * net_device flags, but might be modified above to be turned on if we
1748 * were trying to set some addresses and dongle rejected it...
1751 buflen = sizeof("allmulti") + sizeof(allmulti);
1752 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
1753 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
1756 allmulti = htol32(allmulti);
1758 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
1759 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
1760 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
1761 MFREE(dhd->pub.osh, buf, buflen);
1766 memset(&ioc, 0, sizeof(ioc));
1767 ioc.cmd = WLC_SET_VAR;
1772 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1774 DHD_ERROR(("%s: set allmulti %d failed\n",
1775 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
1778 MFREE(dhd->pub.osh, buf, buflen);
1780 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
1782 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
1784 allmulti = htol32(allmulti);
1786 memset(&ioc, 0, sizeof(ioc));
1787 ioc.cmd = WLC_SET_PROMISC;
1788 ioc.buf = &allmulti;
1789 ioc.len = sizeof(allmulti);
1792 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1794 DHD_ERROR(("%s: set promisc %d failed\n",
1795 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
1800 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
1806 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
1807 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
1810 memset(&ioc, 0, sizeof(ioc));
1811 ioc.cmd = WLC_SET_VAR;
1816 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1818 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
1820 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
1822 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
1829 extern struct net_device *ap_net_dev;
1830 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
1834 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
1836 dhd_info_t *dhd = handle;
1837 dhd_if_event_t *if_event = event_info;
1838 struct net_device *ndev;
1841 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
1842 struct wireless_dev *vwdev, *primary_wdev;
1843 struct net_device *primary_ndev;
1844 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
1846 if (event != DHD_WQ_WORK_IF_ADD) {
1847 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1852 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1857 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
1861 dhd_net_if_lock_local(dhd);
1862 DHD_OS_WAKE_LOCK(&dhd->pub);
1863 DHD_PERIM_LOCK(&dhd->pub);
1865 ifidx = if_event->event.ifidx;
1866 bssidx = if_event->event.bssidx;
1867 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
1869 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
1870 if_event->mac, bssidx, TRUE);
1872 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
1876 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
1877 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
1878 if (unlikely(!vwdev)) {
1879 WL_ERR(("Could not allocate wireless device\n"));
1882 primary_ndev = dhd->pub.info->iflist[0]->net;
1883 primary_wdev = ndev_to_wdev(primary_ndev);
1884 vwdev->wiphy = primary_wdev->wiphy;
1885 vwdev->iftype = if_event->event.role;
1886 vwdev->netdev = ndev;
1887 ndev->ieee80211_ptr = vwdev;
1888 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
1889 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
1890 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
1892 DHD_PERIM_UNLOCK(&dhd->pub);
1893 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
1894 DHD_PERIM_LOCK(&dhd->pub);
1895 if (ret != BCME_OK) {
1896 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
1897 dhd_remove_if(&dhd->pub, ifidx, TRUE);
1899 #ifdef PCIE_FULL_DONGLE
1900 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
1901 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
1902 char iovbuf[WLC_IOCTL_SMLEN];
1905 memset(iovbuf, 0, sizeof(iovbuf));
1906 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
1907 dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
1909 #endif /* PCIE_FULL_DONGLE */
1911 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
1913 DHD_PERIM_UNLOCK(&dhd->pub);
1914 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1915 dhd_net_if_unlock_local(dhd);
1919 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
1921 dhd_info_t *dhd = handle;
1923 dhd_if_event_t *if_event = event_info;
1926 if (event != DHD_WQ_WORK_IF_DEL) {
1927 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1932 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1937 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
1941 dhd_net_if_lock_local(dhd);
1942 DHD_OS_WAKE_LOCK(&dhd->pub);
1943 DHD_PERIM_LOCK(&dhd->pub);
1945 ifidx = if_event->event.ifidx;
1946 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
1948 dhd_remove_if(&dhd->pub, ifidx, TRUE);
1950 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
1952 DHD_PERIM_UNLOCK(&dhd->pub);
1953 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1954 dhd_net_if_unlock_local(dhd);
1958 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
1960 dhd_info_t *dhd = handle;
1961 dhd_if_t *ifp = event_info;
1963 if (event != DHD_WQ_WORK_SET_MAC) {
1964 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1968 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1972 dhd_net_if_lock_local(dhd);
1973 DHD_OS_WAKE_LOCK(&dhd->pub);
1974 DHD_PERIM_LOCK(&dhd->pub);
1978 unsigned long flags;
1980 DHD_GENERAL_LOCK(&dhd->pub, flags);
1981 in_ap = (ap_net_dev != NULL);
1982 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
1985 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
1992 if (ifp == NULL || !dhd->pub.up) {
1993 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
1997 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
1998 ifp->set_macaddress = FALSE;
1999 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
2000 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
2002 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
2005 DHD_PERIM_UNLOCK(&dhd->pub);
2006 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2007 dhd_net_if_unlock_local(dhd);
2011 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
2013 dhd_info_t *dhd = handle;
2014 dhd_if_t *ifp = event_info;
2017 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
2018 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2023 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2027 dhd_net_if_lock_local(dhd);
2028 DHD_OS_WAKE_LOCK(&dhd->pub);
2029 DHD_PERIM_LOCK(&dhd->pub);
2034 unsigned long flags;
2035 DHD_GENERAL_LOCK(&dhd->pub, flags);
2036 in_ap = (ap_net_dev != NULL);
2037 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2040 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2042 ifp->set_multicast = FALSE;
2048 if (ifp == NULL || !dhd->pub.up) {
2049 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2056 _dhd_set_multicast_list(dhd, ifidx);
2057 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
2060 DHD_PERIM_UNLOCK(&dhd->pub);
2061 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2062 dhd_net_if_unlock_local(dhd);
2066 dhd_set_mac_address(struct net_device *dev, void *addr)
2070 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2071 struct sockaddr *sa = (struct sockaddr *)addr;
2075 ifidx = dhd_net2idx(dhd, dev);
2076 if (ifidx == DHD_BAD_IF)
2079 dhdif = dhd->iflist[ifidx];
2081 dhd_net_if_lock_local(dhd);
2082 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
2083 dhdif->set_macaddress = TRUE;
2084 dhd_net_if_unlock_local(dhd);
2085 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
2086 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
2091 dhd_set_multicast_list(struct net_device *dev)
2093 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2096 ifidx = dhd_net2idx(dhd, dev);
2097 if (ifidx == DHD_BAD_IF)
2100 dhd->iflist[ifidx]->set_multicast = TRUE;
2101 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
2102 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
2105 #ifdef PROP_TXSTATUS
2107 dhd_os_wlfc_block(dhd_pub_t *pub)
2109 dhd_info_t *di = (dhd_info_t *)(pub->info);
2111 spin_lock_bh(&di->wlfc_spinlock);
2116 dhd_os_wlfc_unblock(dhd_pub_t *pub)
2118 dhd_info_t *di = (dhd_info_t *)(pub->info);
2121 spin_unlock_bh(&di->wlfc_spinlock);
2125 #endif /* PROP_TXSTATUS */
2128 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
2131 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2132 struct ether_header *eh = NULL;
2134 /* Reject if down */
2135 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
2136 /* free the packet here since the caller won't */
2137 PKTFREE(dhdp->osh, pktbuf, TRUE);
2141 #ifdef PCIE_FULL_DONGLE
2142 if (dhdp->busstate == DHD_BUS_SUSPEND) {
2143 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
2144 PKTFREE(dhdp->osh, pktbuf, TRUE);
2147 #endif /* PCIE_FULL_DONGLE */
2149 #ifdef DHD_UNICAST_DHCP
2150 /* if dhcp_unicast is enabled, we need to convert the */
2151 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2152 if (dhdp->dhcp_unicast) {
2153 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
2155 #endif /* DHD_UNICAST_DHCP */
2156 /* Update multicast statistic */
2157 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
2158 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
2159 eh = (struct ether_header *)pktdata;
2161 if (ETHER_ISMULTI(eh->ether_dhost))
2162 dhdp->tx_multicast++;
2163 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
2164 atomic_inc(&dhd->pend_8021x_cnt);
2166 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2170 #ifdef DHDTCPACK_SUPPRESS
2171 /* If this packet has replaced another packet and got freed, just return */
2172 if (dhd_tcpack_suppress(dhdp, pktbuf))
2174 #endif /* DHDTCPACK_SUPPRESS */
2176 /* Look into the packet and update the packet priority */
2177 #ifndef PKTPRIO_OVERRIDE
2178 if (PKTPRIO(pktbuf) == 0)
2180 pktsetprio(pktbuf, FALSE);
2183 #ifdef PCIE_FULL_DONGLE
2185 * Lkup the per interface hash table, for a matching flowring. If one is not
2186 * available, allocate a unique flowid and add a flowring entry.
2187 * The found or newly created flowid is placed into the pktbuf's tag.
2189 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
2190 if (ret != BCME_OK) {
2191 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
2196 #ifdef PROP_TXSTATUS
2197 if (dhd_wlfc_is_supported(dhdp)) {
2198 /* store the interface ID */
2199 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
2201 /* store destination MAC in the tag as well */
2202 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
2204 /* decide which FIFO this packet belongs to */
2205 if (ETHER_ISMULTI(eh->ether_dhost))
2206 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
2207 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
2209 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
2211 #endif /* PROP_TXSTATUS */
2212 /* If the protocol uses a data header, apply it */
2213 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
2215 /* Use bus module to send data frame */
2217 dhd_htsf_addtxts(dhdp, pktbuf);
2219 #ifdef PROP_TXSTATUS
2221 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
2222 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
2223 /* non-proptxstatus way */
2225 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2227 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2228 #endif /* BCMPCIE */
2233 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2235 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2236 #endif /* BCMPCIE */
2237 #endif /* PROP_TXSTATUS */
2243 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
2248 dhd_info_t *dhd = DHD_DEV_INFO(net);
2249 dhd_if_t *ifp = NULL;
2252 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
2254 uint8 htsfdlystat_sz = 0;
2257 struct ether_header *eh;
2259 #endif /* DHD_WMF */
2261 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2263 DHD_OS_WAKE_LOCK(&dhd->pub);
2264 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2266 /* Reject if down */
2267 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
2268 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
2269 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
2270 netif_stop_queue(net);
2271 /* Send Event when bus down detected during data session */
2273 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
2274 net_os_send_hang_message(net);
2276 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2277 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2278 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2281 return NETDEV_TX_BUSY;
2285 ifp = DHD_DEV_IFP(net);
2286 ifidx = DHD_DEV_IFIDX(net);
2288 ASSERT(ifidx == dhd_net2idx(dhd, net));
2289 ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
2291 if (ifidx == DHD_BAD_IF) {
2292 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
2293 netif_stop_queue(net);
2294 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2295 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2296 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2299 return NETDEV_TX_BUSY;
2303 /* re-align socket buffer if "skb->data" is odd address */
2304 if (((unsigned long)(skb->data)) & 0x1) {
2305 unsigned char *data = skb->data;
2306 uint32 length = skb->len;
2307 PKTPUSH(dhd->pub.osh, skb, 1);
2308 memmove(skb->data, data, length);
2309 PKTSETLEN(dhd->pub.osh, skb, length);
2312 datalen = PKTLEN(dhd->pub.osh, skb);
2314 /* Make sure there's enough room for any header */
2316 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
2317 struct sk_buff *skb2;
2319 DHD_INFO(("%s: insufficient headroom\n",
2320 dhd_ifname(&dhd->pub, ifidx)));
2321 dhd->pub.tx_realloc++;
2323 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
2326 if ((skb = skb2) == NULL) {
2327 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
2328 dhd_ifname(&dhd->pub, ifidx)));
2334 /* Convert to packet */
2335 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
2336 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
2337 dhd_ifname(&dhd->pub, ifidx)));
2338 dev_kfree_skb_any(skb);
2343 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
2344 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
2345 struct ether_header *eh = (struct ether_header *)pktdata;
2347 if (!ETHER_ISMULTI(eh->ether_dhost) &&
2348 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
2349 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
2354 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
2355 iph = (uint8 *)eh + ETHER_HDR_LEN;
2357 /* WMF processing for multicast packets
2358 * Only IPv4 packets are handled
2360 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
2361 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
2362 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
2363 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2365 bool ucast_convert = FALSE;
2366 #ifdef DHD_UCAST_UPNP
2369 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
2370 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
2371 #endif /* DHD_UCAST_UPNP */
2372 #ifdef DHD_IGMP_UCQUERY
2373 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
2374 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
2375 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
2376 #endif /* DHD_IGMP_UCQUERY */
2377 if (ucast_convert) {
2379 unsigned long flags;
2381 DHD_IF_STA_LIST_LOCK(ifp, flags);
2383 /* Convert upnp/igmp query to unicast for each assoc STA */
2384 list_for_each_entry(sta, &ifp->sta_list, list) {
2385 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
2386 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2387 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2388 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2391 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
2394 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2395 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2396 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2398 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2399 return NETDEV_TX_OK;
2401 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
2403 /* There will be no STA info if the packet is coming from LAN host
2406 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
2410 /* Either taken by WMF or we should drop it.
2413 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2414 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2415 return NETDEV_TX_OK;
2417 /* Continue the transmit path */
2422 #endif /* DHD_WMF */
2424 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
2428 ifp->stats.tx_dropped++;
2429 dhd->pub.tx_dropped++;
2432 dhd->pub.tx_packets++;
2433 ifp->stats.tx_packets++;
2434 ifp->stats.tx_bytes += datalen;
2437 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2438 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2440 /* Return ok: we always eat the packet */
2441 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2444 return NETDEV_TX_OK;
2450 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
2452 struct net_device *net;
2453 dhd_info_t *dhd = dhdp->info;
2456 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2460 if (ifidx == ALL_INTERFACES) {
2461 /* Flow control on all active interfaces */
2462 dhdp->txoff = state;
2463 for (i = 0; i < DHD_MAX_IFS; i++) {
2464 if (dhd->iflist[i]) {
2465 net = dhd->iflist[i]->net;
2467 netif_stop_queue(net);
2469 netif_wake_queue(net);
2474 if (dhd->iflist[ifidx]) {
2475 net = dhd->iflist[ifidx]->net;
2477 netif_stop_queue(net);
2479 netif_wake_queue(net);
2490 static const PKTTYPE_INFO packet_type_info[] =
2492 { ETHER_TYPE_IP, "IP" },
2493 { ETHER_TYPE_ARP, "ARP" },
2494 { ETHER_TYPE_BRCM, "BRCM" },
2495 { ETHER_TYPE_802_1X, "802.1X" },
2496 { ETHER_TYPE_WAI, "WAPI" },
2500 static const char *_get_packet_type_str(uint16 type)
2503 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
2505 for (i = 0; i < n; i++) {
2506 if (packet_type_info[i].type == type)
2507 return packet_type_info[i].str;
2510 return packet_type_info[n].str;
2512 #endif /* DHD_RX_DUMP */
2517 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
2519 dhd_info_t *dhd = dhdp->info;
2521 return dhd->rxthread_enabled;
2523 #endif /* DHD_WMF */
2526 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
2528 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2529 struct sk_buff *skb;
2532 void *data, *pnext = NULL;
2535 wl_event_msg_t event;
2538 void *skbhead = NULL;
2539 void *skbprev = NULL;
2540 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2543 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2545 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2547 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
2548 struct ether_header *eh;
2550 struct dot11_llc_snap_header *lsh;
2553 pnext = PKTNEXT(dhdp->osh, pktbuf);
2554 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
2556 ifp = dhd->iflist[ifidx];
2558 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
2560 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2564 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2566 /* Dropping only data packets before registering net device to avoid kernel panic */
2567 #ifndef PROP_TXSTATUS_VSDB
2568 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
2569 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2571 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
2572 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2573 #endif /* PROP_TXSTATUS_VSDB */
2575 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
2577 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2582 lsh = (struct dot11_llc_snap_header *)&eh[1];
2584 if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
2585 (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
2586 bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
2587 lsh->type == HTON16(BTA_PROT_L2CAP)) {
2588 amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
2589 ((uint8 *)eh + RFC1042_HDR_LEN);
2592 #endif /* WLBTAMP */
2594 #ifdef PROP_TXSTATUS
2595 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
2596 /* WLFC may send header only packet when
2597 there is an urgent message but no packet to
2600 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2604 #ifdef DHD_L2_FILTER
2605 /* If block_ping is enabled drop the ping packet */
2606 if (dhdp->block_ping) {
2607 if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
2608 PKTFREE(dhdp->osh, pktbuf, FALSE);
2614 /* WMF processing for multicast packets */
2615 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
2619 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
2620 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
2623 /* The packet is taken by WMF. Continue to next iteration */
2626 /* Packet DROP decision by WMF. Toss it */
2627 DHD_ERROR(("%s: WMF decides to drop packet\n",
2629 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2632 /* Continue the transmit path */
2636 #endif /* DHD_WMF */
2637 #ifdef DHDTCPACK_SUPPRESS
2638 dhd_tcpdata_info_get(dhdp, pktbuf);
2640 skb = PKTTONATIVE(dhdp->osh, pktbuf);
2642 ifp = dhd->iflist[ifidx];
2644 ifp = dhd->iflist[0];
2647 skb->dev = ifp->net;
2649 #ifdef PCIE_FULL_DONGLE
2650 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
2651 (!ifp->ap_isolate)) {
2652 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2653 if (ETHER_ISUCAST(eh->ether_dhost)) {
2654 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
2655 dhd_sendpkt(dhdp, ifidx, pktbuf);
2659 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
2660 dhd_sendpkt(dhdp, ifidx, npktbuf);
2663 #endif /* PCIE_FULL_DONGLE */
2665 /* Get the protocol, maintain skb around eth_type_trans()
2666 * The main reason for this hack is for the limitation of
2667 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
2668 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
2669 * coping of the packet coming from the network stack to add
2670 * BDC, Hardware header etc, during network interface registration
2671 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
2672 * for BDC, Hardware header etc. and not just the ETH_HLEN
2677 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2678 dump_data = skb->data;
2679 protocol = (dump_data[12] << 8) | dump_data[13];
2681 if (protocol == ETHER_TYPE_802_1X) {
2682 DHD_ERROR(("ETHER_TYPE_802_1X: "
2683 "ver %d, type %d, replay %d\n",
2684 dump_data[14], dump_data[15],
2687 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2688 #if defined(DHD_RX_DUMP)
2689 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
2690 if (protocol != ETHER_TYPE_BRCM) {
2691 if (dump_data[0] == 0xFF) {
2692 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
2694 if ((dump_data[12] == 8) &&
2695 (dump_data[13] == 6)) {
2696 DHD_ERROR(("%s: ARP %d\n",
2697 __FUNCTION__, dump_data[0x15]));
2699 } else if (dump_data[0] & 1) {
2700 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
2701 __FUNCTION__, MAC2STRDBG(dump_data)));
2703 #ifdef DHD_RX_FULL_DUMP
2706 for (k = 0; k < skb->len; k++) {
2707 DHD_ERROR(("%02X ", dump_data[k]));
2713 #endif /* DHD_RX_FULL_DUMP */
2715 #endif /* DHD_RX_DUMP */
2717 skb->protocol = eth_type_trans(skb, skb->dev);
2719 if (skb->pkt_type == PACKET_MULTICAST) {
2720 dhd->pub.rx_multicast++;
2721 ifp->stats.multicast++;
2728 dhd_htsf_addrxts(dhdp, pktbuf);
2730 /* Strip header, count, deliver upward */
2731 skb_pull(skb, ETH_HLEN);
2733 /* Process special event packets and then discard them */
2734 memset(&event, 0, sizeof(event));
2735 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
2736 dhd_wl_host_event(dhd, &ifidx,
2737 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2738 skb_mac_header(skb),
2741 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
2745 wl_event_to_host_order(&event);
2747 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
2749 if (event.event_type == WLC_E_BTA_HCI_EVENT) {
2750 dhd_bta_doevt(dhdp, data, event.datalen);
2752 #endif /* WLBTAMP */
2754 #if defined(PNO_SUPPORT)
2755 if (event.event_type == WLC_E_PFN_NET_FOUND) {
2756 /* enforce custom wake lock to garantee that Kernel not suspended */
2757 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
2759 #endif /* PNO_SUPPORT */
2761 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
2762 PKTFREE(dhdp->osh, pktbuf, FALSE);
2764 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
2766 tout_rx = DHD_PACKET_TIMEOUT_MS;
2768 #ifdef PROP_TXSTATUS
2769 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
2770 #endif /* PROP_TXSTATUS */
2773 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
2774 ifp = dhd->iflist[ifidx];
2777 ifp->net->last_rx = jiffies;
2779 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
2780 dhdp->dstats.rx_bytes += skb->len;
2781 dhdp->rx_packets++; /* Local count */
2782 ifp->stats.rx_bytes += skb->len;
2783 ifp->stats.rx_packets++;
2785 #if defined(DHD_TCP_WINSIZE_ADJUST)
2786 if (dhd_use_tcp_window_size_adjust) {
2787 if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
2788 dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
2791 #endif /* DHD_TCP_WINSIZE_ADJUST */
2793 if (in_interrupt()) {
2796 if (dhd->rxthread_enabled) {
2800 PKTSETNEXT(dhdp->osh, skbprev, skb);
2804 /* If the receive is not processed inside an ISR,
2805 * the softirqd must be woken explicitly to service
2806 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
2807 * by netif_rx_ni(), but in earlier kernels, we need
2808 * to do it manually.
2810 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2815 local_irq_save(flags);
2817 local_irq_restore(flags);
2818 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
2823 if (dhd->rxthread_enabled && skbhead)
2824 dhd_sched_rxf(dhdp, skbhead);
2826 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
2827 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
2831 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
2833 /* Linux version has nothing to do */
2838 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
2840 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2841 struct ether_header *eh;
2847 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
2849 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
2850 type = ntoh16(eh->ether_type);
2852 if (type == ETHER_TYPE_802_1X)
2853 atomic_dec(&dhd->pend_8021x_cnt);
2856 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
2857 * If yes generate packet completion event.
2859 len = PKTLEN(dhdp->osh, txp);
2861 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
2862 if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
2863 struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
2865 if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
2866 ntoh16(lsh->type) == BTA_PROT_L2CAP) {
2868 dhd_bta_tx_hcidata_complete(dhdp, txp, success);
2871 #endif /* WLBTAMP */
2874 static struct net_device_stats *
2875 dhd_get_stats(struct net_device *net)
2877 dhd_info_t *dhd = DHD_DEV_INFO(net);
2881 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2883 ifidx = dhd_net2idx(dhd, net);
2884 if (ifidx == DHD_BAD_IF) {
2885 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
2887 memset(&net->stats, 0, sizeof(net->stats));
2891 ifp = dhd->iflist[ifidx];
2895 /* Use the protocol to get dongle stats */
2896 dhd_prot_dstats(&dhd->pub);
2902 dhd_watchdog_thread(void *data)
2904 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2905 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2906 /* This thread doesn't need any user-level access,
2907 * so get rid of all our resources
2909 if (dhd_watchdog_prio > 0) {
2910 struct sched_param param;
2911 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
2912 dhd_watchdog_prio:(MAX_RT_PRIO-1);
2913 setScheduler(current, SCHED_FIFO, ¶m);
2917 if (down_interruptible (&tsk->sema) == 0) {
2918 unsigned long flags;
2919 unsigned long jiffies_at_start = jiffies;
2920 unsigned long time_lapse;
2922 SMP_RD_BARRIER_DEPENDS();
2923 if (tsk->terminated) {
2927 if (dhd->pub.dongle_reset == FALSE) {
2928 DHD_TIMER(("%s:\n", __FUNCTION__));
2930 /* Call the bus module watchdog */
2931 dhd_bus_watchdog(&dhd->pub);
2934 DHD_GENERAL_LOCK(&dhd->pub, flags);
2935 /* Count the tick for reference */
2937 time_lapse = jiffies - jiffies_at_start;
2939 /* Reschedule the watchdog */
2940 if (dhd->wd_timer_valid)
2941 mod_timer(&dhd->timer,
2943 msecs_to_jiffies(dhd_watchdog_ms) -
2944 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
2945 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2951 complete_and_exit(&tsk->completed, 0);
2954 static void dhd_watchdog(ulong data)
2956 dhd_info_t *dhd = (dhd_info_t *)data;
2957 unsigned long flags;
2959 if (dhd->pub.dongle_reset) {
2963 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
2964 up(&dhd->thr_wdt_ctl.sema);
2968 /* Call the bus module watchdog */
2969 dhd_bus_watchdog(&dhd->pub);
2971 DHD_GENERAL_LOCK(&dhd->pub, flags);
2972 /* Count the tick for reference */
2975 /* Reschedule the watchdog */
2976 if (dhd->wd_timer_valid)
2977 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
2978 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2982 #ifdef ENABLE_ADAPTIVE_SCHED
2984 dhd_sched_policy(int prio)
2986 struct sched_param param;
2987 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
2988 param.sched_priority = 0;
2989 setScheduler(current, SCHED_NORMAL, ¶m);
2991 if (get_scheduler_policy(current) != SCHED_FIFO) {
2992 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
2993 setScheduler(current, SCHED_FIFO, ¶m);
2997 #endif /* ENABLE_ADAPTIVE_SCHED */
2998 #ifdef DEBUG_CPU_FREQ
2999 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
3001 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
3002 struct cpufreq_freqs *freq = data;
3006 if (val == CPUFREQ_POSTCHANGE) {
3007 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3008 freq->new, freq->cpu));
3009 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
3015 #endif /* DEBUG_CPU_FREQ */
3017 dhd_dpc_thread(void *data)
3019 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3020 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3022 /* This thread doesn't need any user-level access,
3023 * so get rid of all our resources
3025 if (dhd_dpc_prio > 0)
3027 struct sched_param param;
3028 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
3029 setScheduler(current, SCHED_FIFO, ¶m);
3032 #ifdef CUSTOM_DPC_CPUCORE
3033 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
3035 #ifdef CUSTOM_SET_CPUCORE
3036 dhd->pub.current_dpc = current;
3037 #endif /* CUSTOM_SET_CPUCORE */
3039 /* Run until signal received */
3041 if (!binary_sema_down(tsk)) {
3042 #ifdef ENABLE_ADAPTIVE_SCHED
3043 dhd_sched_policy(dhd_dpc_prio);
3044 #endif /* ENABLE_ADAPTIVE_SCHED */
3045 SMP_RD_BARRIER_DEPENDS();
3046 if (tsk->terminated) {
3050 /* Call bus dpc unless it indicated down (then clean stop) */
3051 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3052 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
3053 while (dhd_bus_dpc(dhd->pub.bus)) {
3054 /* process all data */
3056 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
3057 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3061 dhd_bus_stop(dhd->pub.bus, TRUE);
3062 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3069 complete_and_exit(&tsk->completed, 0);
3073 dhd_rxf_thread(void *data)
3075 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3076 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3077 #if defined(WAIT_DEQUEUE)
3078 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
3079 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
3081 dhd_pub_t *pub = &dhd->pub;
3083 /* This thread doesn't need any user-level access,
3084 * so get rid of all our resources
3086 if (dhd_rxf_prio > 0)
3088 struct sched_param param;
3089 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
3090 setScheduler(current, SCHED_FIFO, ¶m);
3093 DAEMONIZE("dhd_rxf");
3094 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
3096 /* signal: thread has started */
3097 complete(&tsk->completed);
3098 #ifdef CUSTOM_SET_CPUCORE
3099 dhd->pub.current_rxf = current;
3100 #endif /* CUSTOM_SET_CPUCORE */
3102 /* Run until signal received */
3104 if (down_interruptible(&tsk->sema) == 0) {
3106 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
3109 #ifdef ENABLE_ADAPTIVE_SCHED
3110 dhd_sched_policy(dhd_rxf_prio);
3111 #endif /* ENABLE_ADAPTIVE_SCHED */
3113 SMP_RD_BARRIER_DEPENDS();
3115 if (tsk->terminated) {
3118 skb = dhd_rxf_dequeue(pub);
3124 void *skbnext = PKTNEXT(pub->osh, skb);
3125 PKTSETNEXT(pub->osh, skb, NULL);
3127 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3131 local_irq_save(flags);
3133 local_irq_restore(flags);
3138 #if defined(WAIT_DEQUEUE)
3139 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
3141 watchdogTime = OSL_SYSUPTIME();
3145 DHD_OS_WAKE_UNLOCK(pub);
3151 complete_and_exit(&tsk->completed, 0);
3155 void dhd_dpc_kill(dhd_pub_t *dhdp)
3167 tasklet_kill(&dhd->tasklet);
3168 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
3170 #endif /* BCMPCIE */
3177 dhd = (dhd_info_t *)data;
3179 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
3180 * down below , wake lock is set,
3181 * the tasklet is initialized in dhd_attach()
3183 /* Call bus dpc unless it indicated down (then clean stop) */
3184 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3185 if (dhd_bus_dpc(dhd->pub.bus))
3186 tasklet_schedule(&dhd->tasklet);
3188 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3190 dhd_bus_stop(dhd->pub.bus, TRUE);
3191 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3196 dhd_sched_dpc(dhd_pub_t *dhdp)
3198 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3200 DHD_OS_WAKE_LOCK(dhdp);
3201 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
3202 /* If the semaphore does not get up,
3203 * wake unlock should be done here
3205 if (!binary_sema_up(&dhd->thr_dpc_ctl))
3206 DHD_OS_WAKE_UNLOCK(dhdp);
3209 tasklet_schedule(&dhd->tasklet);
3214 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
3216 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3217 #ifdef RXF_DEQUEUE_ON_BUSY
3220 #endif /* RXF_DEQUEUE_ON_BUSY */
3222 DHD_OS_WAKE_LOCK(dhdp);
3224 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
3225 #ifdef RXF_DEQUEUE_ON_BUSY
3227 ret = dhd_rxf_enqueue(dhdp, skb);
3228 if (ret == BCME_OK || ret == BCME_ERROR)
3231 OSL_SLEEP(50); /* waiting for dequeueing */
3232 } while (retry-- > 0);
3234 if (retry <= 0 && ret == BCME_BUSY) {
3238 void *skbnext = PKTNEXT(dhdp->osh, skbp);
3239 PKTSETNEXT(dhdp->osh, skbp, NULL);
3243 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
3246 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3247 up(&dhd->thr_rxf_ctl.sema);
3250 #else /* RXF_DEQUEUE_ON_BUSY */
3252 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
3255 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3256 up(&dhd->thr_rxf_ctl.sema);
3259 #endif /* RXF_DEQUEUE_ON_BUSY */
3263 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
3265 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
3271 memset(&ioc, 0, sizeof(ioc));
3273 ioc.cmd = WLC_GET_VAR;
3275 ioc.len = (uint)sizeof(buf);
3278 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3279 buf[sizeof(buf) - 1] = '\0';
3280 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3281 /* Check for older dongle image that doesn't support toe_ol */
3283 DHD_ERROR(("%s: toe not supported by device\n",
3284 dhd_ifname(&dhd->pub, ifidx)));
3288 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3292 memcpy(toe_ol, buf, sizeof(uint32));
3296 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
3298 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
3304 memset(&ioc, 0, sizeof(ioc));
3306 ioc.cmd = WLC_SET_VAR;
3308 ioc.len = (uint)sizeof(buf);
3311 /* Set toe_ol as requested */
3313 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3314 buf[sizeof(buf) - 1] = '\0';
3315 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
3317 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3318 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
3319 dhd_ifname(&dhd->pub, ifidx), ret));
3323 /* Enable toe globally only if any components are enabled. */
3325 toe = (toe_ol != 0);
3328 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
3330 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3331 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3339 #if defined(WL_CFG80211)
3340 void dhd_set_scb_probe(dhd_pub_t *dhd)
3342 #define NUM_SCB_MAX_PROBE 3
3344 wl_scb_probe_t scb_probe;
3345 char iovbuf[WL_EVENTING_MASK_LEN + 12];
3347 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
3349 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
3352 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
3354 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3355 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
3357 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
3359 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
3361 bcm_mkiovar("scb_probe", (char *)&scb_probe,
3362 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
3363 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3364 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
3365 #undef NUM_SCB_MAX_PROBE
3368 #endif /* WL_CFG80211 */
3370 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3372 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
3374 dhd_info_t *dhd = DHD_DEV_INFO(net);
3376 snprintf(info->driver, sizeof(info->driver), "wl");
3377 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
3380 struct ethtool_ops dhd_ethtool_ops = {
3381 .get_drvinfo = dhd_ethtool_get_drvinfo
3383 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
3386 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3388 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
3390 struct ethtool_drvinfo info;
3391 char drvname[sizeof(info.driver)];
3394 struct ethtool_value edata;
3395 uint32 toe_cmpnt, csum_dir;
3399 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3401 /* all ethtool calls start with a cmd word */
3402 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
3406 case ETHTOOL_GDRVINFO:
3407 /* Copy out any request driver name */
3408 if (copy_from_user(&info, uaddr, sizeof(info)))
3410 strncpy(drvname, info.driver, sizeof(info.driver));
3411 drvname[sizeof(info.driver)-1] = '\0';
3413 /* clear struct for return */
3414 memset(&info, 0, sizeof(info));
3417 /* if dhd requested, identify ourselves */
3418 if (strcmp(drvname, "?dhd") == 0) {
3419 snprintf(info.driver, sizeof(info.driver), "dhd");
3420 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
3421 info.version[sizeof(info.version) - 1] = '\0';
3424 /* otherwise, require dongle to be up */
3425 else if (!dhd->pub.up) {
3426 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
3430 /* finally, report dongle driver type */
3431 else if (dhd->pub.iswl)
3432 snprintf(info.driver, sizeof(info.driver), "wl");
3434 snprintf(info.driver, sizeof(info.driver), "xx");
3436 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
3437 if (copy_to_user(uaddr, &info, sizeof(info)))
3439 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
3440 (int)sizeof(drvname), drvname, info.driver));
3444 /* Get toe offload components from dongle */
3445 case ETHTOOL_GRXCSUM:
3446 case ETHTOOL_GTXCSUM:
3447 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3450 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3453 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
3455 if (copy_to_user(uaddr, &edata, sizeof(edata)))
3459 /* Set toe offload components in dongle */
3460 case ETHTOOL_SRXCSUM:
3461 case ETHTOOL_STXCSUM:
3462 if (copy_from_user(&edata, uaddr, sizeof(edata)))
3465 /* Read the current settings, update and write back */
3466 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3469 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3471 if (edata.data != 0)
3472 toe_cmpnt |= csum_dir;
3474 toe_cmpnt &= ~csum_dir;
3476 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
3479 /* If setting TX checksum mode, tell Linux the new mode */
3480 if (cmd == ETHTOOL_STXCSUM) {
3482 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
3484 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
3496 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3498 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
3503 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3510 dhd = (dhd_info_t *)dhdp->info;
3511 #if !defined(BCMPCIE)
3512 if (dhd->thr_dpc_ctl.thr_pid < 0) {
3513 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
3518 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
3519 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
3520 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
3521 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
3522 net_os_send_hang_message(net);
3528 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
3530 int bcmerror = BCME_OK;
3532 struct net_device *net;
3534 net = dhd_idx2net(pub, ifidx);
3536 bcmerror = BCME_BADARG;
3541 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
3543 /* check for local dhd ioctl and handle it */
3544 if (ioc->driver == DHD_IOCTL_MAGIC) {
3545 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
3547 pub->bcmerror = bcmerror;
3551 /* send to dongle (must be up, and wl). */
3552 if (pub->busstate != DHD_BUS_DATA) {
3553 bcmerror = BCME_DONGLE_DOWN;
3558 bcmerror = BCME_DONGLE_DOWN;
3563 * Flush the TX queue if required for proper message serialization:
3564 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
3565 * prevent M4 encryption and
3566 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
3567 * prevent disassoc frame being sent before WPS-DONE frame.
3569 if (ioc->cmd == WLC_SET_KEY ||
3570 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3571 strncmp("wsec_key", data_buf, 9) == 0) ||
3572 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3573 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
3574 ioc->cmd == WLC_DISASSOC)
3575 dhd_wait_pend8021x(net);
3579 /* short cut wl ioctl calls here */
3580 if (strcmp("htsf", data_buf) == 0) {
3581 dhd_ioctl_htsf_get(dhd, 0);
3585 if (strcmp("htsflate", data_buf) == 0) {
3587 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
3588 memset(&maxdelayts, 0, sizeof(tstamp_t));
3592 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3593 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3594 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3595 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3601 if (strcmp("htsfclear", data_buf) == 0) {
3602 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3603 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3604 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3605 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3609 if (strcmp("htsfhis", data_buf) == 0) {
3610 dhd_dump_htsfhisto(&vi_d1, "H to D");
3611 dhd_dump_htsfhisto(&vi_d2, "D to D");
3612 dhd_dump_htsfhisto(&vi_d3, "D to H");
3613 dhd_dump_htsfhisto(&vi_d4, "H to H");
3616 if (strcmp("tsport", data_buf) == 0) {
3618 memcpy(&tsport, data_buf + 7, 4);
3620 DHD_ERROR(("current timestamp port: %d \n", tsport));
3625 #endif /* WLMEDIA_HTSF */
3627 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
3628 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
3630 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3632 bcmerror = BCME_UNSUPPORTED;
3636 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3639 dhd_check_hang(net, pub, bcmerror);
3645 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
3647 dhd_info_t *dhd = DHD_DEV_INFO(net);
3652 void *local_buf = NULL;
3655 DHD_OS_WAKE_LOCK(&dhd->pub);
3656 DHD_PERIM_LOCK(&dhd->pub);
3658 /* Interface up check for built-in type */
3659 if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
3660 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
3661 DHD_PERIM_UNLOCK(&dhd->pub);
3662 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3666 /* send to dongle only if we are not waiting for reload already */
3667 if (dhd->pub.hang_was_sent) {
3668 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
3669 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
3670 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3671 return OSL_ERROR(BCME_DONGLE_DOWN);
3674 ifidx = dhd_net2idx(dhd, net);
3675 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
3677 if (ifidx == DHD_BAD_IF) {
3678 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
3679 DHD_PERIM_UNLOCK(&dhd->pub);
3680 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3684 #if defined(WL_WIRELESS_EXT)
3685 /* linux wireless extensions */
3686 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
3687 /* may recurse, do NOT lock */
3688 ret = wl_iw_ioctl(net, ifr, cmd);
3689 DHD_PERIM_UNLOCK(&dhd->pub);
3690 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3693 #endif /* defined(WL_WIRELESS_EXT) */
3695 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3696 if (cmd == SIOCETHTOOL) {
3697 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
3698 DHD_PERIM_UNLOCK(&dhd->pub);
3699 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3702 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3704 if (cmd == SIOCDEVPRIVATE+1) {
3705 ret = wl_android_priv_cmd(net, ifr, cmd);
3706 dhd_check_hang(net, &dhd->pub, ret);
3707 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3711 if (cmd != SIOCDEVPRIVATE) {
3712 DHD_PERIM_UNLOCK(&dhd->pub);
3713 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3717 memset(&ioc, 0, sizeof(ioc));
3719 #ifdef CONFIG_COMPAT
3720 if (is_compat_task()) {
3721 compat_wl_ioctl_t compat_ioc;
3722 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
3723 bcmerror = BCME_BADADDR;
3726 ioc.cmd = compat_ioc.cmd;
3727 ioc.buf = compat_ptr(compat_ioc.buf);
3728 ioc.len = compat_ioc.len;
3729 ioc.set = compat_ioc.set;
3730 ioc.used = compat_ioc.used;
3731 ioc.needed = compat_ioc.needed;
3732 /* To differentiate between wl and dhd read 4 more byes */
3733 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
3734 sizeof(uint)) != 0)) {
3735 bcmerror = BCME_BADADDR;
3739 #endif /* CONFIG_COMPAT */
3741 /* Copy the ioc control structure part of ioctl request */
3742 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
3743 bcmerror = BCME_BADADDR;
3747 /* To differentiate between wl and dhd read 4 more byes */
3748 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
3749 sizeof(uint)) != 0)) {
3750 bcmerror = BCME_BADADDR;
3755 if (!capable(CAP_NET_ADMIN)) {
3756 bcmerror = BCME_EPERM;
3761 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
3762 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
3763 bcmerror = BCME_NOMEM;
3767 DHD_PERIM_UNLOCK(&dhd->pub);
3768 if (copy_from_user(local_buf, ioc.buf, buflen)) {
3769 DHD_PERIM_LOCK(&dhd->pub);
3770 bcmerror = BCME_BADADDR;
3773 DHD_PERIM_LOCK(&dhd->pub);
3775 *(char *)(local_buf + buflen) = '\0';
3778 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
3780 if (!bcmerror && buflen && local_buf && ioc.buf) {
3781 DHD_PERIM_UNLOCK(&dhd->pub);
3782 if (copy_to_user(ioc.buf, local_buf, buflen))
3784 DHD_PERIM_LOCK(&dhd->pub);
3789 MFREE(dhd->pub.osh, local_buf, buflen+1);
3791 DHD_PERIM_UNLOCK(&dhd->pub);
3792 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3794 return OSL_ERROR(bcmerror);
3800 dhd_stop(struct net_device *net)
3803 dhd_info_t *dhd = DHD_DEV_INFO(net);
3804 DHD_OS_WAKE_LOCK(&dhd->pub);
3805 DHD_PERIM_LOCK(&dhd->pub);
3806 printk("%s: Enter %p\n", __FUNCTION__, net);
3807 if (dhd->pub.up == 0) {
3811 dhd_if_flush_sta(DHD_DEV_IFP(net));
3814 ifidx = dhd_net2idx(dhd, net);
3815 BCM_REFERENCE(ifidx);
3817 /* Set state and stop OS transmissions */
3818 netif_stop_queue(net);
3823 wl_cfg80211_down(NULL);
3826 * For CFG80211: Clean up all the left over virtual interfaces
3827 * when the primary Interface is brought down. [ifconfig wlan0 down]
3829 if (!dhd_download_fw_on_driverload) {
3830 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
3831 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
3834 dhd_net_if_lock_local(dhd);
3835 for (i = 1; i < DHD_MAX_IFS; i++)
3836 dhd_remove_if(&dhd->pub, i, FALSE);
3837 dhd_net_if_unlock_local(dhd);
3841 #endif /* WL_CFG80211 */
3843 #ifdef PROP_TXSTATUS
3844 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
3846 /* Stop the protocol module */
3847 dhd_prot_stop(&dhd->pub);
3849 OLD_MOD_DEC_USE_COUNT;
3851 if (ifidx == 0 && !dhd_download_fw_on_driverload)
3852 wl_android_wifi_off(net);
3853 dhd->pub.rxcnt_timeout = 0;
3854 dhd->pub.txcnt_timeout = 0;
3856 dhd->pub.hang_was_sent = 0;
3858 /* Clear country spec for for built-in type driver */
3859 if (!dhd_download_fw_on_driverload) {
3860 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
3861 dhd->pub.dhd_cspec.rev = 0;
3862 dhd->pub.dhd_cspec.ccode[0] = 0x00;
3865 printk("%s: Exit\n", __FUNCTION__);
3866 DHD_PERIM_UNLOCK(&dhd->pub);
3867 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3871 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
3872 extern bool g_first_broadcast_scan;
3876 static int dhd_interworking_enable(dhd_pub_t *dhd)
3878 char iovbuf[WLC_IOCTL_SMLEN];
3879 uint32 enable = true;
3882 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
3883 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3884 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
3887 if (ret == BCME_OK) {
3888 /* basic capabilities for HS20 REL2 */
3889 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
3890 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
3891 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
3892 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3893 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
3902 dhd_open(struct net_device *net)
3904 dhd_info_t *dhd = DHD_DEV_INFO(net);
3914 printk("%s: Enter %p\n", __FUNCTION__, net);
3915 #if defined(MULTIPLE_SUPPLICANT)
3916 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
3917 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
3918 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
3920 mutex_lock(&_dhd_sdio_mutex_lock_);
3922 #endif /* MULTIPLE_SUPPLICANT */
3924 DHD_OS_WAKE_LOCK(&dhd->pub);
3925 DHD_PERIM_LOCK(&dhd->pub);
3926 dhd->pub.dongle_trap_occured = 0;
3927 dhd->pub.hang_was_sent = 0;
3931 * Force start if ifconfig_up gets called before START command
3932 * We keep WEXT's wl_control_wl_start to provide backward compatibility
3933 * This should be removed in the future
3935 ret = wl_control_wl_start(net);
3937 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
3943 ifidx = dhd_net2idx(dhd, net);
3944 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
3947 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
3952 if (!dhd->iflist[ifidx]) {
3953 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
3959 atomic_set(&dhd->pend_8021x_cnt, 0);
3960 if (!dhd_download_fw_on_driverload) {
3961 DHD_ERROR(("\n%s\n", dhd_version));
3962 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
3963 g_first_broadcast_scan = TRUE;
3965 ret = wl_android_wifi_on(net);
3967 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
3968 __FUNCTION__, ret));
3974 if (dhd->pub.busstate != DHD_BUS_DATA) {
3976 /* try to bring up bus */
3977 DHD_PERIM_UNLOCK(&dhd->pub);
3978 ret = dhd_bus_start(&dhd->pub);
3979 DHD_PERIM_LOCK(&dhd->pub);
3981 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
3988 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
3989 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
3992 /* Get current TOE mode from dongle */
3993 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
3994 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
3996 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
3999 #if defined(WL_CFG80211)
4000 if (unlikely(wl_cfg80211_up(NULL))) {
4001 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
4005 dhd_set_scb_probe(&dhd->pub);
4007 ret = wldev_ioctl(net, WLC_UP, &up, sizeof(up), true);
4008 if (unlikely(ret)) {
4009 DHD_ERROR(("WLC_UP error (%d)\n", ret));
4011 #endif /* WL_CFG80211 */
4014 /* Allow transmit calls */
4015 netif_start_queue(net);
4019 dhd_dbg_init(&dhd->pub);
4022 OLD_MOD_INC_USE_COUNT;
4027 DHD_PERIM_UNLOCK(&dhd->pub);
4028 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4030 #if defined(MULTIPLE_SUPPLICANT)
4031 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
4032 mutex_unlock(&_dhd_sdio_mutex_lock_);
4034 #endif /* MULTIPLE_SUPPLICANT */
4036 printk("%s: Exit ret=%d\n", __FUNCTION__, ret);
4040 int dhd_do_driver_init(struct net_device *net)
4042 dhd_info_t *dhd = NULL;
4045 DHD_ERROR(("Primary Interface not initialized \n"));
4049 #ifdef MULTIPLE_SUPPLICANT
4050 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
4051 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4052 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
4055 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
4056 #endif /* MULTIPLE_SUPPLICANT */
4058 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
4059 dhd = DHD_DEV_INFO(net);
4061 /* If driver is already initialized, do nothing
4063 if (dhd->pub.busstate == DHD_BUS_DATA) {
4064 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
4068 if (dhd_open(net) < 0) {
4069 DHD_ERROR(("Driver Init Failed \n"));
4077 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4081 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4085 /* handle IF event caused by wl commands, SoftAP, WEXT and
4086 * anything else. This has to be done asynchronously otherwise
4087 * DPC will be blocked (and iovars will timeout as DPC has no chance
4088 * to read the response back)
4090 if (ifevent->ifidx > 0) {
4091 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4093 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4094 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4095 strncpy(if_event->name, name, IFNAMSIZ);
4096 if_event->name[IFNAMSIZ - 1] = '\0';
4097 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
4098 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
4105 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4107 dhd_if_event_t *if_event;
4110 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4112 #endif /* WL_CFG80211 */
4114 /* handle IF event caused by wl commands, SoftAP, WEXT and
4117 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4118 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4119 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4120 strncpy(if_event->name, name, IFNAMSIZ);
4121 if_event->name[IFNAMSIZ - 1] = '\0';
4122 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
4123 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
4128 /* unregister and free the existing net_device interface (if any) in iflist and
4129 * allocate a new one. the slot is reused. this function does NOT register the
4130 * new interface to linux kernel. dhd_register_if does the job
4133 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
4134 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
4136 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4139 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
4140 ifp = dhdinfo->iflist[ifidx];
4143 if (ifp->net != NULL) {
4144 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
4146 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
4148 /* in unregister_netdev case, the interface gets freed by net->destructor
4149 * (which is set to free_netdev)
4151 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4152 free_netdev(ifp->net);
4154 netif_stop_queue(ifp->net);
4156 unregister_netdev(ifp->net);
4158 unregister_netdevice(ifp->net);
4163 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
4165 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
4170 memset(ifp, 0, sizeof(dhd_if_t));
4171 ifp->info = dhdinfo;
4173 ifp->bssidx = bssidx;
4175 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
4177 /* Allocate etherdev, including space for private structure */
4178 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
4179 if (ifp->net == NULL) {
4180 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
4184 /* Setup the dhd interface's netdevice private structure. */
4185 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
4187 if (name && name[0]) {
4188 strncpy(ifp->net->name, name, IFNAMSIZ);
4189 ifp->net->name[IFNAMSIZ - 1] = '\0';
4193 ifp->net->destructor = free_netdev;
4195 ifp->net->destructor = dhd_netdev_free;
4197 ifp->net->destructor = free_netdev;
4198 #endif /* WL_CFG80211 */
4199 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
4200 ifp->name[IFNAMSIZ - 1] = '\0';
4201 dhdinfo->iflist[ifidx] = ifp;
4203 #ifdef PCIE_FULL_DONGLE
4204 /* Initialize STA info list */
4205 INIT_LIST_HEAD(&ifp->sta_list);
4206 DHD_IF_STA_LIST_LOCK_INIT(ifp);
4207 #endif /* PCIE_FULL_DONGLE */
4213 if (ifp->net != NULL) {
4214 dhd_dev_priv_clear(ifp->net);
4215 free_netdev(ifp->net);
4218 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4221 dhdinfo->iflist[ifidx] = NULL;
4225 /* unregister and free the the net_device interface associated with the indexed
4226 * slot, also free the slot memory and set the slot pointer to NULL
4229 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
4231 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4234 ifp = dhdinfo->iflist[ifidx];
4236 if (ifp->net != NULL) {
4237 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
4239 /* in unregister_netdev case, the interface gets freed by net->destructor
4240 * (which is set to free_netdev)
4242 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4243 free_netdev(ifp->net);
4245 netif_stop_queue(ifp->net);
4250 unregister_netdev(ifp->net);
4252 unregister_netdevice(ifp->net);
4257 dhd_wmf_cleanup(dhdpub, ifidx);
4258 #endif /* DHD_WMF */
4260 dhd_if_del_sta_list(ifp);
4262 dhdinfo->iflist[ifidx] = NULL;
4263 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4270 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4271 static struct net_device_ops dhd_ops_pri = {
4272 .ndo_open = dhd_open,
4273 .ndo_stop = dhd_stop,
4274 .ndo_get_stats = dhd_get_stats,
4275 .ndo_do_ioctl = dhd_ioctl_entry,
4276 .ndo_start_xmit = dhd_start_xmit,
4277 .ndo_set_mac_address = dhd_set_mac_address,
4278 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4279 .ndo_set_rx_mode = dhd_set_multicast_list,
4281 .ndo_set_multicast_list = dhd_set_multicast_list,
4285 static struct net_device_ops dhd_ops_virt = {
4286 .ndo_get_stats = dhd_get_stats,
4287 .ndo_do_ioctl = dhd_ioctl_entry,
4288 .ndo_start_xmit = dhd_start_xmit,
4289 .ndo_set_mac_address = dhd_set_mac_address,
4290 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4291 .ndo_set_rx_mode = dhd_set_multicast_list,
4293 .ndo_set_multicast_list = dhd_set_multicast_list,
4296 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
4299 extern void debugger_init(void *bus_handle);
4303 #ifdef SHOW_LOGTRACE
4304 static char *logstrs_path = "/root/logstrs.bin";
4305 module_param(logstrs_path, charp, S_IRUGO);
4308 dhd_init_logstrs_array(dhd_event_log_t *temp)
4310 struct file *filep = NULL;
4313 char *raw_fmts = NULL;
4314 int logstrs_size = 0;
4316 logstr_header_t *hdr = NULL;
4317 uint32 *lognums = NULL;
4318 char *logstrs = NULL;
4326 filep = filp_open(logstrs_path, O_RDONLY, 0);
4327 if (IS_ERR(filep)) {
4328 DHD_ERROR(("Failed to open the file logstrs.bin in %s", __FUNCTION__));
4331 error = vfs_stat(logstrs_path, &stat);
4333 DHD_ERROR(("Failed in %s to find file stat", __FUNCTION__));
4336 logstrs_size = (int) stat.size;
4338 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
4339 if (raw_fmts == NULL) {
4340 DHD_ERROR(("Failed to allocate raw_fmts memory"));
4343 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
4344 DHD_ERROR(("Error: Log strings file read failed"));
4348 /* Remember header from the logstrs.bin file */
4349 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
4350 sizeof(logstr_header_t));
4352 if (hdr->log_magic == LOGSTRS_MAGIC) {
4354 * logstrs.bin start with header.
4356 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
4357 ram_index = (hdr->ram_lognums_offset -
4358 hdr->rom_lognums_offset) / sizeof(uint32);
4359 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
4360 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
4363 * Legacy logstrs.bin format without header.
4365 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
4366 if (num_fmts == 0) {
4367 /* Legacy ROM/RAM logstrs.bin format:
4368 * - ROM 'lognums' section
4369 * - RAM 'lognums' section
4370 * - ROM 'logstrs' section.
4371 * - RAM 'logstrs' section.
4373 * 'lognums' is an array of indexes for the strings in the
4374 * 'logstrs' section. The first uint32 is 0 (index of first
4375 * string in ROM 'logstrs' section).
4377 * The 4324b5 is the only ROM that uses this legacy format. Use the
4378 * fixed number of ROM fmtnums to find the start of the RAM
4379 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
4380 * find the ROM 'logstrs' section.
4382 #define NUM_4324B5_ROM_FMTS 186
4383 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
4384 ram_index = NUM_4324B5_ROM_FMTS;
4385 lognums = (uint32 *) raw_fmts;
4386 num_fmts = ram_index;
4387 logstrs = (char *) &raw_fmts[num_fmts << 2];
4388 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
4390 logstrs = (char *) &raw_fmts[num_fmts << 2];
4393 /* Legacy RAM-only logstrs.bin format:
4394 * - RAM 'lognums' section
4395 * - RAM 'logstrs' section.
4397 * 'lognums' is an array of indexes for the strings in the
4398 * 'logstrs' section. The first uint32 is an index to the
4399 * start of 'logstrs'. Therefore, if this index is divided
4400 * by 'sizeof(uint32)' it provides the number of logstr
4404 lognums = (uint32 *) raw_fmts;
4405 logstrs = (char *) &raw_fmts[num_fmts << 2];
4408 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
4410 DHD_ERROR(("Failed to allocate fmts memory"));
4414 for (i = 0; i < num_fmts; i++) {
4415 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
4416 * (they are 0-indexed relative to 'rom_logstrs_offset').
4418 * RAM lognums are already indexed to point to the correct RAM logstrs (they
4419 * are 0-indexed relative to the start of the logstrs.bin file).
4421 if (i == ram_index) {
4424 fmts[i] = &logstrs[lognums[i]];
4427 temp->raw_fmts = raw_fmts;
4428 temp->num_fmts = num_fmts;
4429 filp_close(filep, NULL);
4438 filp_close(filep, NULL);
4443 #endif /* SHOW_LOGTRACE */
4447 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
4449 dhd_info_t *dhd = NULL;
4450 struct net_device *net = NULL;
4451 char if_name[IFNAMSIZ] = {'\0'};
4452 uint32 bus_type = -1;
4453 uint32 bus_num = -1;
4454 uint32 slot_num = -1;
4455 wifi_adapter_info_t *adapter = NULL;
4457 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
4458 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4460 /* will implement get_ids for DBUS later */
4461 #if defined(BCMSDIO)
4462 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
4464 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
4466 /* Allocate primary dhd_info */
4467 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
4469 dhd = MALLOC(osh, sizeof(dhd_info_t));
4471 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
4475 memset(dhd, 0, sizeof(dhd_info_t));
4476 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
4478 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
4481 dhd->adapter = adapter;
4483 #ifdef GET_CUSTOM_MAC_ENABLE
4484 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
4485 #endif /* GET_CUSTOM_MAC_ENABLE */
4486 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
4487 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
4489 /* Initialize thread based operation and lock */
4490 sema_init(&dhd->sdsem, 1);
4492 /* Link to info module */
4493 dhd->pub.info = dhd;
4496 /* Link to bus module */
4498 dhd->pub.hdrlen = bus_hdrlen;
4500 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
4501 * because dhd_detech will check .info is NULL or not.
4503 if (dhd_conf_attach(&dhd->pub) != 0) {
4504 DHD_ERROR(("dhd_conf_attach failed\n"));
4507 dhd_conf_reset(&dhd->pub);
4508 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
4510 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
4511 * This is indeed a hack but we have to make it work properly before we have a better
4514 dhd_update_fw_nv_path(dhd);
4516 /* Set network interface name if it was provided as module parameter */
4517 if (iface_name[0]) {
4520 strncpy(if_name, iface_name, IFNAMSIZ);
4521 if_name[IFNAMSIZ - 1] = 0;
4522 len = strlen(if_name);
4523 ch = if_name[len - 1];
4524 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
4525 strcat(if_name, "%d");
4527 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
4530 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
4532 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4535 net->netdev_ops = NULL;
4538 sema_init(&dhd->proto_sem, 1);
4540 #ifdef PROP_TXSTATUS
4541 spin_lock_init(&dhd->wlfc_spinlock);
4543 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
4544 dhd->pub.plat_init = dhd_wlfc_plat_init;
4545 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
4546 #endif /* PROP_TXSTATUS */
4548 /* Initialize other structure content */
4549 init_waitqueue_head(&dhd->ioctl_resp_wait);
4550 init_waitqueue_head(&dhd->ctrl_wait);
4552 /* Initialize the spinlocks */
4553 spin_lock_init(&dhd->sdlock);
4554 spin_lock_init(&dhd->txqlock);
4555 spin_lock_init(&dhd->dhd_lock);
4556 spin_lock_init(&dhd->rxf_lock);
4557 #if defined(RXFRAME_THREAD)
4558 dhd->rxthread_enabled = TRUE;
4559 #endif /* defined(RXFRAME_THREAD) */
4561 #ifdef DHDTCPACK_SUPPRESS
4562 spin_lock_init(&dhd->tcpack_lock);
4563 #endif /* DHDTCPACK_SUPPRESS */
4565 /* Initialize Wakelock stuff */
4566 spin_lock_init(&dhd->wakelock_spinlock);
4567 dhd->wakelock_counter = 0;
4568 dhd->wakelock_wd_counter = 0;
4569 dhd->wakelock_rx_timeout_enable = 0;
4570 dhd->wakelock_ctrl_timeout_enable = 0;
4571 #ifdef CONFIG_HAS_WAKELOCK
4572 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
4573 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
4574 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
4575 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
4576 #endif /* CONFIG_HAS_WAKELOCK */
4577 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4578 mutex_init(&dhd->dhd_net_if_mutex);
4579 mutex_init(&dhd->dhd_suspend_mutex);
4581 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
4583 /* Attach and link in the protocol */
4584 if (dhd_prot_attach(&dhd->pub) != 0) {
4585 DHD_ERROR(("dhd_prot_attach failed\n"));
4588 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
4591 /* Attach and link in the cfg80211 */
4592 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
4593 DHD_ERROR(("wl_cfg80211_attach failed\n"));
4597 dhd_monitor_init(&dhd->pub);
4598 dhd_state |= DHD_ATTACH_STATE_CFG80211;
4600 #if defined(WL_WIRELESS_EXT)
4601 /* Attach and link in the iw */
4602 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
4603 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
4604 DHD_ERROR(("wl_iw_attach failed\n"));
4607 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
4609 #endif /* defined(WL_WIRELESS_EXT) */
4611 #ifdef SHOW_LOGTRACE
4612 dhd_init_logstrs_array(&dhd->event_data);
4613 #endif /* SHOW_LOGTRACE */
4615 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
4616 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
4621 /* Set up the watchdog timer */
4622 init_timer(&dhd->timer);
4623 dhd->timer.data = (ulong)dhd;
4624 dhd->timer.function = dhd_watchdog;
4625 dhd->default_wd_interval = dhd_watchdog_ms;
4627 if (dhd_watchdog_prio >= 0) {
4628 /* Initialize watchdog thread */
4629 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
4632 dhd->thr_wdt_ctl.thr_pid = -1;
4636 debugger_init((void *) bus);
4639 /* Set up the bottom half handler */
4640 if (dhd_dpc_prio >= 0) {
4641 /* Initialize DPC thread */
4642 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
4644 /* use tasklet for dpc */
4645 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
4646 dhd->thr_dpc_ctl.thr_pid = -1;
4649 if (dhd->rxthread_enabled) {
4650 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
4651 /* Initialize RXF thread */
4652 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
4655 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
4657 #if defined(CONFIG_PM_SLEEP)
4658 if (!dhd_pm_notifier_registered) {
4659 dhd_pm_notifier_registered = TRUE;
4660 register_pm_notifier(&dhd_pm_notifier);
4662 #endif /* CONFIG_PM_SLEEP */
4664 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
4665 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
4666 dhd->early_suspend.suspend = dhd_early_suspend;
4667 dhd->early_suspend.resume = dhd_late_resume;
4668 register_early_suspend(&dhd->early_suspend);
4669 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
4670 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
4672 #ifdef ARP_OFFLOAD_SUPPORT
4673 dhd->pend_ipaddr = 0;
4674 if (!dhd_inetaddr_notifier_registered) {
4675 dhd_inetaddr_notifier_registered = TRUE;
4676 register_inetaddr_notifier(&dhd_inetaddr_notifier);
4678 #endif /* ARP_OFFLOAD_SUPPORT */
4680 if (!dhd_inet6addr_notifier_registered) {
4681 dhd_inet6addr_notifier_registered = TRUE;
4682 register_inet6addr_notifier(&dhd_inet6addr_notifier);
4685 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
4686 #ifdef DEBUG_CPU_FREQ
4687 dhd->new_freq = alloc_percpu(int);
4688 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
4689 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
4691 #ifdef DHDTCPACK_SUPPRESS
4693 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
4694 #elif defined(BCMPCIE)
4695 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
4697 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
4698 #endif /* BCMSDIO */
4699 #endif /* DHDTCPACK_SUPPRESS */
4701 dhd_state |= DHD_ATTACH_STATE_DONE;
4702 dhd->dhd_state = dhd_state;
4708 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
4709 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
4710 __FUNCTION__, dhd_state, &dhd->pub));
4711 dhd->dhd_state = dhd_state;
4712 dhd_detach(&dhd->pub);
4713 dhd_free(&dhd->pub);
4719 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
4721 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
4722 return DHD_FLAG_HOSTAP_MODE;
4723 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
4724 return DHD_FLAG_P2P_MODE;
4725 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
4726 return DHD_FLAG_IBSS_MODE;
4727 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
4728 return DHD_FLAG_MFG_MODE;
4730 return DHD_FLAG_STA_MODE;
4733 extern int rkwifi_set_firmware(char *fw, char *nvram);
4735 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
4740 const char *fw = NULL;
4741 const char *nv = NULL;
4742 const char *conf = NULL;
4743 char firmware[100] = {0};
4744 char nvram[100] = {0};
4745 char config[100] = "/system/etc/firmware/config.txt";
4746 wifi_adapter_info_t *adapter = dhdinfo->adapter;
4749 /* Update firmware and nvram path. The path may be from adapter info or module parameter
4750 * The path from adapter info is used for initialization only (as it won't change).
4752 * The firmware_path/nvram_path module parameter may be changed by the system at run
4753 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
4754 * command may change dhdinfo->fw_path. As such we need to clear the path info in
4755 * module parameter after it is copied. We won't update the path until the module parameter
4756 * is changed again (first character is not '\0')
4759 /* set default firmware and nvram path for built-in type driver */
4760 // if (!dhd_download_fw_on_driverload) {
4761 rkwifi_set_firmware(firmware, nvram);
4762 #ifdef CONFIG_BCMDHD_FW_PATH
4763 fw = CONFIG_BCMDHD_FW_PATH;
4766 #endif /* CONFIG_BCMDHD_FW_PATH */
4767 #ifdef CONFIG_BCMDHD_NVRAM_PATH
4768 nv = CONFIG_BCMDHD_NVRAM_PATH;
4771 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
4772 #ifdef CONFIG_BCMDHD_CONFIG_PATH
4773 conf = CONFIG_BCMDHD_CONFIG_PATH;
4776 #endif /* CONFIG_BCMDHD_CONFIG_PATH */
4779 /* check if we need to initialize the path */
4780 if (dhdinfo->fw_path[0] == '\0') {
4781 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
4782 fw = adapter->fw_path;
4785 if (dhdinfo->nv_path[0] == '\0') {
4786 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
4787 nv = adapter->nv_path;
4789 if (dhdinfo->conf_path[0] == '\0') {
4790 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
4791 conf = adapter->conf_path;
4794 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
4796 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
4798 if (firmware_path[0] != '\0')
4800 if (nvram_path[0] != '\0')
4802 if (config_path[0] != '\0')
4805 if (fw && fw[0] != '\0') {
4806 fw_len = strlen(fw);
4807 if (fw_len >= sizeof(dhdinfo->fw_path)) {
4808 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
4811 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
4812 if (dhdinfo->fw_path[fw_len-1] == '\n')
4813 dhdinfo->fw_path[fw_len-1] = '\0';
4815 if (nv && nv[0] != '\0') {
4816 nv_len = strlen(nv);
4817 if (nv_len >= sizeof(dhdinfo->nv_path)) {
4818 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
4821 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
4822 if (dhdinfo->nv_path[nv_len-1] == '\n')
4823 dhdinfo->nv_path[nv_len-1] = '\0';
4825 if (conf && conf[0] != '\0') {
4826 conf_len = strlen(conf);
4827 if (conf_len >= sizeof(dhdinfo->conf_path)) {
4828 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
4831 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
4832 if (dhdinfo->conf_path[conf_len-1] == '\n')
4833 dhdinfo->conf_path[conf_len-1] = '\0';
4837 /* clear the path in module parameter */
4838 firmware_path[0] = '\0';
4839 nvram_path[0] = '\0';
4840 config_path[0] = '\0';
4843 #ifndef BCMEMBEDIMAGE
4844 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
4845 if (dhdinfo->fw_path[0] == '\0') {
4846 DHD_ERROR(("firmware path not found\n"));
4849 if (dhdinfo->nv_path[0] == '\0') {
4850 DHD_ERROR(("nvram path not found\n"));
4853 if (dhdinfo->conf_path[0] == '\0') {
4854 DHD_ERROR(("config path not found\n"));
4857 #endif /* BCMEMBEDIMAGE */
4863 #ifdef EXYNOS5433_PCIE_WAR
4864 extern int enum_wifi;
4865 #endif /* EXYNOS5433_PCIE_WAR */
4867 dhd_bus_start(dhd_pub_t *dhdp)
4870 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
4871 unsigned long flags;
4875 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
4877 DHD_PERIM_LOCK(dhdp);
4879 /* try to download image and nvram to the dongle */
4880 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
4881 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
4882 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
4883 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
4884 dhd->fw_path, dhd->nv_path, dhd->conf_path);
4886 #ifdef EXYNOS5433_PCIE_WAR
4888 #endif /* EXYNOS5433_PCIE_WAR */
4889 DHD_ERROR(("%s: failed to download firmware %s\n",
4890 __FUNCTION__, dhd->fw_path));
4891 DHD_PERIM_UNLOCK(dhdp);
4894 #ifdef EXYNOS5433_PCIE_WAR
4896 #endif /* EXYNOS5433_PCIE_WAR */
4898 if (dhd->pub.busstate != DHD_BUS_LOAD) {
4899 DHD_PERIM_UNLOCK(dhdp);
4903 dhd_os_sdlock(dhdp);
4905 /* Start the watchdog timer */
4906 dhd->pub.tickcnt = 0;
4907 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
4909 /* Bring up the bus */
4910 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
4912 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
4913 dhd_os_sdunlock(dhdp);
4914 DHD_PERIM_UNLOCK(dhdp);
4917 #if defined(OOB_INTR_ONLY)
4918 /* Host registration for OOB interrupt */
4919 if (dhd_bus_oob_intr_register(dhdp)) {
4920 /* deactivate timer and wait for the handler to finish */
4922 DHD_GENERAL_LOCK(&dhd->pub, flags);
4923 dhd->wd_timer_valid = FALSE;
4924 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4925 del_timer_sync(&dhd->timer);
4927 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
4928 dhd_os_sdunlock(dhdp);
4929 DHD_PERIM_UNLOCK(dhdp);
4930 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4934 /* Enable oob at firmware */
4935 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
4937 #ifdef PCIE_FULL_DONGLE
4940 uint32 num_flowrings; /* includes H2D common rings */
4941 num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
4942 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
4944 if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
4945 dhd_os_sdunlock(dhdp);
4946 DHD_PERIM_UNLOCK(dhdp);
4950 #endif /* PCIE_FULL_DONGLE */
4952 /* Do protocol initialization necessary for IOCTL/IOVAR */
4953 dhd_prot_init(&dhd->pub);
4955 /* If bus is not ready, can't come up */
4956 if (dhd->pub.busstate != DHD_BUS_DATA) {
4957 DHD_GENERAL_LOCK(&dhd->pub, flags);
4958 dhd->wd_timer_valid = FALSE;
4959 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4960 del_timer_sync(&dhd->timer);
4961 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
4962 dhd_os_sdunlock(dhdp);
4963 DHD_PERIM_UNLOCK(dhdp);
4964 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4968 dhd_os_sdunlock(dhdp);
4970 /* Bus is ready, query any dongle information */
4971 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
4972 DHD_PERIM_UNLOCK(dhdp);
4976 #ifdef ARP_OFFLOAD_SUPPORT
4977 if (dhd->pend_ipaddr) {
4978 #ifdef AOE_IP_ALIAS_SUPPORT
4979 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
4980 #endif /* AOE_IP_ALIAS_SUPPORT */
4981 dhd->pend_ipaddr = 0;
4983 #endif /* ARP_OFFLOAD_SUPPORT */
4985 DHD_PERIM_UNLOCK(dhdp);
4990 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
4992 char iovbuf[WLC_IOCTL_SMLEN];
4993 uint32 tdls = tdls_on;
4995 uint32 tdls_auto_op = 0;
4996 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
4997 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
4998 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
5000 if (!FW_SUPPORTED(dhd, tdls))
5003 if (dhd->tdls_enable == tdls_on)
5005 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
5006 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5007 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
5010 dhd->tdls_enable = tdls_on;
5013 tdls_auto_op = auto_on;
5014 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
5015 iovbuf, sizeof(iovbuf));
5016 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5017 sizeof(iovbuf), TRUE, 0)) < 0) {
5018 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
5023 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
5024 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
5025 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5026 sizeof(iovbuf), TRUE, 0)) < 0) {
5027 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
5030 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
5031 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5032 sizeof(iovbuf), TRUE, 0)) < 0) {
5033 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
5036 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
5037 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5038 sizeof(iovbuf), TRUE, 0)) < 0) {
5039 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
5048 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
5050 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5053 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
5058 #ifdef PCIE_FULL_DONGLE
5059 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
5061 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5062 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
5063 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
5064 tdls_peer_node_t *new = NULL, *prev = NULL;
5066 uint8 sa[ETHER_ADDR_LEN];
5067 int ifidx = dhd_net2idx(dhd, dev);
5069 if (ifidx == DHD_BAD_IF)
5072 dhdif = dhd->iflist[ifidx];
5073 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
5076 while (cur != NULL) {
5077 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5078 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
5079 __FUNCTION__, __LINE__));
5085 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
5087 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
5090 memcpy(new->addr, da, ETHER_ADDR_LEN);
5091 new->next = dhdp->peer_tbl.node;
5092 dhdp->peer_tbl.node = new;
5093 dhdp->peer_tbl.tdls_peer_count++;
5096 while (cur != NULL) {
5097 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5098 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
5100 prev->next = cur->next;
5102 dhdp->peer_tbl.node = cur->next;
5103 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
5104 dhdp->peer_tbl.tdls_peer_count--;
5110 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
5113 #endif /* PCIE_FULL_DONGLE */
5116 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
5121 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
5123 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
5124 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
5129 #if !defined(AP) && defined(WLP2P)
5130 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
5131 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
5132 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
5133 * would still be named as fw_bcmdhd_apsta.
5136 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
5139 char buf[WLC_IOCTL_SMLEN];
5140 bool mchan_supported = FALSE;
5141 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
5142 * test mode, that means we only will use the mode as it is
5144 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
5146 if (FW_SUPPORTED(dhd, vsdb)) {
5147 mchan_supported = TRUE;
5149 if (!FW_SUPPORTED(dhd, p2p)) {
5150 DHD_TRACE(("Chip does not support p2p\n"));
5154 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
5155 memset(buf, 0, sizeof(buf));
5156 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
5157 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
5159 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
5164 /* By default, chip supports single chan concurrency,
5165 * now lets check for mchan
5167 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
5168 if (mchan_supported)
5169 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
5170 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
5171 /* For customer_hw4, although ICS,
5172 * we still support concurrent mode
5184 #if defined(READ_CONFIG_FROM_FILE)
5185 #include <linux/fs.h>
5186 #include <linux/ctype.h>
5188 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5189 bool PM_control = TRUE;
5191 static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
5194 wl_country_t cspec = {{0}, -1, {0}};
5196 char *endptr = NULL;
5198 char smbuf[WLC_IOCTL_SMLEN*2];
5200 if (!strcmp(name, "country")) {
5201 revstr = strchr(value, '/');
5203 cspec.rev = strtoul(revstr + 1, &endptr, 10);
5204 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5205 cspec.country_abbrev[2] = '\0';
5206 memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
5209 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5210 memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
5211 get_customized_country_code(dhd->info->adapter,
5212 (char *)&cspec.country_abbrev, &cspec);
5214 memset(smbuf, 0, sizeof(smbuf));
5215 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
5216 cspec.country_abbrev, cspec.rev));
5217 iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
5218 smbuf, sizeof(smbuf));
5219 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5220 smbuf, iolen, TRUE, 0);
5221 } else if (!strcmp(name, "roam_scan_period")) {
5222 var_int = (int)simple_strtol(value, NULL, 0);
5223 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
5224 &var_int, sizeof(var_int), TRUE, 0);
5225 } else if (!strcmp(name, "roam_delta")) {
5230 x.val = (int)simple_strtol(value, NULL, 0);
5231 /* x.band = WLC_BAND_AUTO; */
5232 x.band = WLC_BAND_ALL;
5233 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
5234 } else if (!strcmp(name, "roam_trigger")) {
5237 roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
5238 roam_trigger[1] = WLC_BAND_ALL;
5239 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
5240 sizeof(roam_trigger), TRUE, 0);
5243 } else if (!strcmp(name, "PM")) {
5245 var_int = (int)simple_strtol(value, NULL, 0);
5247 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
5248 &var_int, sizeof(var_int), TRUE, 0);
5250 #if defined(CONFIG_PM_LOCK)
5252 g_pm_control = TRUE;
5253 printk("%s var_int=%d don't control PM\n", __func__, var_int);
5255 g_pm_control = FALSE;
5256 printk("%s var_int=%d do control PM\n", __func__, var_int);
5263 else if (!strcmp(name, "btamp_chan")) {
5269 btamp_chan = (int)simple_strtol(value, NULL, 0);
5270 iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
5271 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
5272 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
5273 __FUNCTION__, btamp_chan, ret));
5275 DHD_ERROR(("%s btamp_chan %d set success\n",
5276 __FUNCTION__, btamp_chan));
5278 #endif /* WLBTAMP */
5279 else if (!strcmp(name, "band")) {
5281 if (!strcmp(value, "auto"))
5282 var_int = WLC_BAND_AUTO;
5283 else if (!strcmp(value, "a"))
5284 var_int = WLC_BAND_5G;
5285 else if (!strcmp(value, "b"))
5286 var_int = WLC_BAND_2G;
5287 else if (!strcmp(value, "all"))
5288 var_int = WLC_BAND_ALL;
5290 printk(" set band value should be one of the a or b or all\n");
5291 var_int = WLC_BAND_AUTO;
5293 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
5294 sizeof(var_int), TRUE, 0)) < 0)
5295 printk(" set band err=%d\n", ret);
5297 } else if (!strcmp(name, "cur_etheraddr")) {
5298 struct ether_addr ea;
5303 bcm_ether_atoe(value, &ea);
5305 ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
5307 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
5311 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
5312 ea.octet[0], ea.octet[1], ea.octet[2],
5313 ea.octet[3], ea.octet[4], ea.octet[5]));
5315 iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
5317 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
5319 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5323 memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
5328 char iovbuf[WLC_IOCTL_SMLEN];
5330 /* wlu_iovar_setint */
5331 var_int = (int)simple_strtol(value, NULL, 0);
5333 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
5334 if (!strcmp(name, "roam_off")) {
5335 /* Setup timeout if Beacons are lost to report link down */
5337 uint bcn_timeout = 2;
5338 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
5339 iovbuf, sizeof(iovbuf));
5340 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5343 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
5345 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
5347 iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
5348 iovbuf, sizeof(iovbuf));
5349 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5350 iovbuf, iovlen, TRUE, 0);
5356 static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
5358 mm_segment_t old_fs;
5360 struct file *fp = NULL;
5362 char *buf = NULL, *p, *name, *value;
5366 config_path = CONFIG_BCMDHD_CONFIG_PATH;
5370 printk(KERN_ERR "config_path can't read. \n");
5376 if ((ret = vfs_stat(config_path, &stat))) {
5378 printk(KERN_ERR "%s: Failed to get information (%d)\n",
5384 if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
5385 printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
5389 printk("dhd_preinit_config : config path : %s \n", config_path);
5391 if (!(fp = dhd_os_open_image(config_path)) ||
5392 (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
5395 buf[stat.size] = '\0';
5396 for (p = buf; *p; p++) {
5399 for (name = p++; *p && !isspace(*p); p++) {
5403 for (value = p; *p && !isspace(*p); p++);
5405 if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
5406 printk(KERN_ERR "%s: %s=%s\n",
5407 bcmerrorstr(ret), name, value);
5417 dhd_os_close_image(fp);
5419 MFREE(dhd->osh, buf, stat.size+1);
5426 #endif /* READ_CONFIG_FROM_FILE */
5429 dhd_preinit_ioctls(dhd_pub_t *dhd)
5432 char eventmask[WL_EVENTING_MASK_LEN];
5433 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
5434 uint32 buf_key_b4_m4 = 1;
5436 eventmsgs_ext_t *eventmask_msg;
5437 char iov_buf[WLC_IOCTL_SMLEN];
5440 aibss_bcn_force_config_t bcn_config;
5444 #endif /* WLAIBSS_PS */
5445 #endif /* WLAIBSS */
5446 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
5449 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
5450 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
5451 uint32 ampdu_ba_wsize = 0;
5452 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
5453 #if defined(CUSTOM_AMPDU_MPDU)
5454 int32 ampdu_mpdu = 0;
5456 #if defined(CUSTOM_AMPDU_RELEASE)
5457 int32 ampdu_release = 0;
5460 #if defined(BCMSDIO)
5461 #ifdef PROP_TXSTATUS
5462 int wlfc_enable = TRUE;
5464 uint32 hostreorder = 1;
5466 #endif /* DISABLE_11N */
5467 #endif /* PROP_TXSTATUS */
5469 #ifdef PCIE_FULL_DONGLE
5470 uint32 wl_ap_isolate;
5471 #endif /* PCIE_FULL_DONGLE */
5473 #ifdef DHD_ENABLE_LPC
5475 #endif /* DHD_ENABLE_LPC */
5476 uint power_mode = PM_FAST;
5477 uint32 dongle_align = DHD_SDALIGN;
5478 #if defined(BCMSDIO)
5479 uint32 glom = CUSTOM_GLOM_SETTING;
5480 #endif /* defined(BCMSDIO) */
5481 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
5484 uint bcn_timeout = dhd->conf->bcn_timeout;
5486 #if defined(ARP_OFFLOAD_SUPPORT)
5489 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
5490 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
5491 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
5492 char buf[WLC_IOCTL_SMLEN];
5494 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
5497 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
5498 int roam_scan_period[2] = {10, WLC_BAND_ALL};
5499 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
5500 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
5501 int roam_fullscan_period = 60;
5502 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
5503 int roam_fullscan_period = 120;
5504 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
5506 #ifdef DISABLE_BUILTIN_ROAM
5508 #endif /* DISABLE_BUILTIN_ROAM */
5509 #endif /* ROAM_ENABLE */
5514 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
5515 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
5516 struct ether_addr p2p_ea;
5522 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
5523 uint32 apsta = 1; /* Enable APSTA mode */
5524 #elif defined(SOFTAP_AND_GC)
5527 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
5528 #ifdef GET_CUSTOM_MAC_ENABLE
5529 struct ether_addr ea_addr;
5530 #endif /* GET_CUSTOM_MAC_ENABLE */
5534 #endif /* DISABLE_11N */
5536 #if defined(DISABLE_11AC)
5538 #endif /* DISABLE_11AC */
5541 #endif /* USE_WL_TXBF */
5542 #ifdef AMPDU_VO_ENABLE
5543 struct ampdu_tid_control tid;
5545 #ifdef USE_WL_FRAMEBURST
5546 uint32 frameburst = 1;
5547 #endif /* USE_WL_FRAMEBURST */
5548 #ifdef DHD_SET_FW_HIGHSPEED
5549 uint32 ack_ratio = 250;
5550 uint32 ack_ratio_depth = 64;
5551 #endif /* DHD_SET_FW_HIGHSPEED */
5552 #ifdef SUPPORT_2G_VHT
5553 uint32 vht_features = 0x3; /* 2G enable | rates all */
5554 #endif /* SUPPORT_2G_VHT */
5555 #ifdef CUSTOM_PSPRETEND_THR
5556 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
5558 #ifdef PKT_FILTER_SUPPORT
5559 dhd_pkt_filter_enable = TRUE;
5560 #endif /* PKT_FILTER_SUPPORT */
5562 dhd->tdls_enable = FALSE;
5564 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
5565 DHD_TRACE(("Enter %s\n", __FUNCTION__));
5567 dhd_conf_set_band(dhd);
5570 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
5571 (op_mode == DHD_FLAG_MFG_MODE)) {
5572 /* Check and adjust IOCTL response timeout for Manufactring firmware */
5573 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
5574 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
5578 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
5579 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
5581 #ifdef GET_CUSTOM_MAC_ENABLE
5582 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
5584 memset(buf, 0, sizeof(buf));
5585 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
5586 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
5588 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5591 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
5593 #endif /* GET_CUSTOM_MAC_ENABLE */
5594 /* Get the default device MAC address directly from firmware */
5595 memset(buf, 0, sizeof(buf));
5596 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
5597 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
5599 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
5602 /* Update public MAC address after reading from Firmware */
5603 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
5605 #ifdef GET_CUSTOM_MAC_ENABLE
5607 #endif /* GET_CUSTOM_MAC_ENABLE */
5609 /* get a capabilities from firmware */
5610 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
5611 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
5612 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
5613 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
5614 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
5615 __FUNCTION__, ret));
5618 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
5619 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
5620 #ifdef SET_RANDOM_MAC_SOFTAP
5623 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
5624 #if defined(ARP_OFFLOAD_SUPPORT)
5627 #ifdef PKT_FILTER_SUPPORT
5628 dhd_pkt_filter_enable = FALSE;
5630 #ifdef SET_RANDOM_MAC_SOFTAP
5631 SRANDOM32((uint)jiffies);
5632 rand_mac = RANDOM32();
5633 iovbuf[0] = 0x02; /* locally administered bit */
5636 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
5637 iovbuf[4] = (unsigned char)(rand_mac >> 8);
5638 iovbuf[5] = (unsigned char)(rand_mac >> 16);
5640 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
5641 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
5643 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5645 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
5646 #endif /* SET_RANDOM_MAC_SOFTAP */
5647 #if !defined(AP) && defined(WL_CFG80211)
5648 /* Turn off MPC in AP mode */
5649 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
5650 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5651 sizeof(iovbuf), TRUE, 0)) < 0) {
5652 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
5655 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
5656 (op_mode == DHD_FLAG_MFG_MODE)) {
5657 #if defined(ARP_OFFLOAD_SUPPORT)
5659 #endif /* ARP_OFFLOAD_SUPPORT */
5660 #ifdef PKT_FILTER_SUPPORT
5661 dhd_pkt_filter_enable = FALSE;
5662 #endif /* PKT_FILTER_SUPPORT */
5663 dhd->op_mode = DHD_FLAG_MFG_MODE;
5665 uint32 concurrent_mode = 0;
5666 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
5667 (op_mode == DHD_FLAG_P2P_MODE)) {
5668 #if defined(ARP_OFFLOAD_SUPPORT)
5671 #ifdef PKT_FILTER_SUPPORT
5672 dhd_pkt_filter_enable = FALSE;
5674 dhd->op_mode = DHD_FLAG_P2P_MODE;
5675 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
5676 (op_mode == DHD_FLAG_IBSS_MODE)) {
5677 dhd->op_mode = DHD_FLAG_IBSS_MODE;
5679 dhd->op_mode = DHD_FLAG_STA_MODE;
5680 #if !defined(AP) && defined(WLP2P)
5681 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
5682 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
5683 #if defined(ARP_OFFLOAD_SUPPORT)
5686 dhd->op_mode |= concurrent_mode;
5689 /* Check if we are enabling p2p */
5690 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
5691 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
5692 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5693 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5694 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
5697 #if defined(SOFTAP_AND_GC)
5698 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
5699 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
5700 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
5703 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
5704 ETHER_SET_LOCALADDR(&p2p_ea);
5705 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
5706 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
5707 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5708 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5709 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
5711 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
5715 (void)concurrent_mode;
5719 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
5720 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
5721 /* Set Country code */
5722 if (dhd->dhd_cspec.ccode[0] != 0) {
5723 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
5724 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
5725 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
5726 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
5727 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
5729 dhd_conf_set_country(dhd);
5730 dhd_conf_fix_country(dhd);
5732 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
5734 #if defined(DISABLE_11AC)
5735 bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
5736 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
5737 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
5738 #endif /* DISABLE_11AC */
5740 /* Set Listen Interval */
5741 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
5742 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
5743 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
5745 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
5746 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
5747 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
5748 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5749 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
5750 #if defined(ROAM_ENABLE)
5751 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
5752 sizeof(roam_trigger), TRUE, 0)) < 0)
5753 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
5754 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
5755 sizeof(roam_scan_period), TRUE, 0)) < 0)
5756 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
5757 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
5758 sizeof(roam_delta), TRUE, 0)) < 0)
5759 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
5760 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
5761 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
5762 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
5763 #endif /* ROAM_ENABLE */
5764 dhd_conf_set_roam(dhd);
5767 bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
5768 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5771 /* by default TDLS on and auto mode off */
5772 _dhd_tdls_enable(dhd, true, false, NULL);
5775 #ifdef DHD_ENABLE_LPC
5777 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
5778 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5779 sizeof(iovbuf), TRUE, 0)) < 0) {
5780 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
5782 #endif /* DHD_ENABLE_LPC */
5784 /* Set PowerSave mode */
5785 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
5787 /* Match Host and Dongle rx alignment */
5788 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
5789 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5791 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
5792 /* enable credall to reduce the chance of no bus credit happened. */
5793 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
5794 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5797 #if defined(BCMSDIO)
5798 if (glom != DEFAULT_GLOM_VALUE) {
5799 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
5800 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
5801 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5803 #endif /* defined(BCMSDIO) */
5804 dhd_conf_set_glom(dhd);
5806 /* Setup timeout if Beacons are lost and roam is off to report link down */
5807 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
5808 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5809 /* Setup assoc_retry_max count to reconnect target AP in dongle */
5810 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
5811 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5812 #if defined(AP) && !defined(WLP2P)
5813 /* Turn off MPC in AP mode */
5814 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
5815 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5816 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
5817 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5818 #endif /* defined(AP) && !defined(WLP2P) */
5819 dhd_conf_set_mimo_bw_cap(dhd);
5820 dhd_conf_force_wme(dhd);
5821 dhd_conf_set_stbc(dhd);
5822 dhd_conf_set_srl(dhd);
5823 dhd_conf_set_lrl(dhd);
5824 dhd_conf_set_spect(dhd);
5827 if (ap_fw_loaded == TRUE) {
5828 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
5832 #if defined(KEEP_ALIVE)
5834 /* Set Keep Alive : be sure to use FW with -keepalive */
5838 if (ap_fw_loaded == FALSE)
5840 if (!(dhd->op_mode &
5841 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
5842 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
5843 DHD_ERROR(("%s set keeplive failed %d\n",
5844 __FUNCTION__, res));
5847 #endif /* defined(KEEP_ALIVE) */
5850 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
5851 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5852 sizeof(iovbuf), TRUE, 0)) < 0) {
5853 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
5855 #endif /* USE_WL_TXBF */
5856 #ifdef USE_WL_FRAMEBURST
5857 /* Set frameburst to value */
5858 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
5859 sizeof(frameburst), TRUE, 0)) < 0) {
5860 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
5862 #endif /* USE_WL_FRAMEBURST */
5863 #ifdef DHD_SET_FW_HIGHSPEED
5865 bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
5866 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5867 sizeof(iovbuf), TRUE, 0)) < 0) {
5868 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
5871 /* Set ack_ratio_depth */
5872 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
5873 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5874 sizeof(iovbuf), TRUE, 0)) < 0) {
5875 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
5877 #endif /* DHD_SET_FW_HIGHSPEED */
5878 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
5879 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
5880 /* Set ampdu ba wsize to 64 or 16 */
5881 #ifdef CUSTOM_AMPDU_BA_WSIZE
5882 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
5884 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
5885 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
5886 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
5887 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
5888 if (ampdu_ba_wsize != 0) {
5889 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
5890 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5891 sizeof(iovbuf), TRUE, 0)) < 0) {
5892 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
5893 __FUNCTION__, ampdu_ba_wsize, ret));
5896 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
5899 /* Configure custom IBSS beacon transmission */
5900 if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
5903 bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
5904 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5905 sizeof(iovbuf), TRUE, 0)) < 0) {
5906 DHD_ERROR(("%s Set aibss to %d failed %d\n",
5907 __FUNCTION__, aibss, ret));
5911 bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
5912 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5913 sizeof(iovbuf), TRUE, 0)) < 0) {
5914 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
5915 __FUNCTION__, aibss, ret));
5917 #endif /* WLAIBSS_PS */
5919 memset(&bcn_config, 0, sizeof(bcn_config));
5920 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
5921 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
5922 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
5923 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
5924 bcn_config.len = sizeof(bcn_config);
5926 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
5927 sizeof(aibss_bcn_force_config_t), iov_buf, sizeof(iov_buf));
5928 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
5929 sizeof(iov_buf), TRUE, 0)) < 0) {
5930 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
5931 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
5932 AIBSS_BCN_FLOOD_DUR, ret));
5934 #endif /* WLAIBSS */
5936 #if defined(CUSTOM_AMPDU_MPDU)
5937 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
5938 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
5939 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
5940 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5941 sizeof(iovbuf), TRUE, 0)) < 0) {
5942 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
5943 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
5946 #endif /* CUSTOM_AMPDU_MPDU */
5947 dhd_conf_set_ampdu_ba_wsize(dhd);
5949 #if defined(CUSTOM_AMPDU_RELEASE)
5950 ampdu_release = CUSTOM_AMPDU_RELEASE;
5951 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
5952 bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf));
5953 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5954 sizeof(iovbuf), TRUE, 0)) < 0) {
5955 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
5956 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
5959 #endif /* CUSTOM_AMPDU_RELEASE */
5961 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
5962 /* Read 4-way handshake requirements */
5963 if (dhd_use_idsup == 1) {
5964 bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
5965 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5966 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
5967 * in-dongle supplicant.
5969 if (ret >= 0 || ret == BCME_NOTREADY)
5970 dhd->fw_4way_handshake = TRUE;
5971 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
5973 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
5974 #ifdef SUPPORT_2G_VHT
5975 bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
5976 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5977 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
5979 #endif /* SUPPORT_2G_VHT */
5980 #ifdef CUSTOM_PSPRETEND_THR
5981 /* Turn off MPC in AP mode */
5982 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
5983 iovbuf, sizeof(iovbuf));
5984 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5985 sizeof(iovbuf), TRUE, 0)) < 0) {
5986 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
5987 __FUNCTION__, ret));
5991 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
5992 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5993 sizeof(iovbuf), TRUE, 0)) < 0) {
5994 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
5997 /* Read event_msgs mask */
5998 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
5999 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
6000 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
6003 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
6005 /* Setup event_msgs */
6006 setbit(eventmask, WLC_E_SET_SSID);
6007 setbit(eventmask, WLC_E_PRUNE);
6008 setbit(eventmask, WLC_E_AUTH);
6009 setbit(eventmask, WLC_E_AUTH_IND);
6010 setbit(eventmask, WLC_E_ASSOC);
6011 setbit(eventmask, WLC_E_REASSOC);
6012 setbit(eventmask, WLC_E_REASSOC_IND);
6013 setbit(eventmask, WLC_E_DEAUTH);
6014 setbit(eventmask, WLC_E_DEAUTH_IND);
6015 setbit(eventmask, WLC_E_DISASSOC_IND);
6016 setbit(eventmask, WLC_E_DISASSOC);
6017 setbit(eventmask, WLC_E_JOIN);
6018 setbit(eventmask, WLC_E_START);
6019 setbit(eventmask, WLC_E_ASSOC_IND);
6020 setbit(eventmask, WLC_E_PSK_SUP);
6021 setbit(eventmask, WLC_E_LINK);
6022 setbit(eventmask, WLC_E_NDIS_LINK);
6023 setbit(eventmask, WLC_E_MIC_ERROR);
6024 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
6025 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
6027 setbit(eventmask, WLC_E_PMKID_CACHE);
6028 setbit(eventmask, WLC_E_TXFAIL);
6030 setbit(eventmask, WLC_E_JOIN_START);
6031 setbit(eventmask, WLC_E_SCAN_COMPLETE);
6033 setbit(eventmask, WLC_E_HTSFSYNC);
6034 #endif /* WLMEDIA_HTSF */
6036 setbit(eventmask, WLC_E_PFN_NET_FOUND);
6037 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
6038 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
6039 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
6040 #endif /* PNO_SUPPORT */
6041 /* enable dongle roaming event */
6042 setbit(eventmask, WLC_E_ROAM);
6043 setbit(eventmask, WLC_E_BSSID);
6045 setbit(eventmask, WLC_E_ADDTS_IND);
6046 setbit(eventmask, WLC_E_DELTS_IND);
6049 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
6052 setbit(eventmask, WLC_E_ESCAN_RESULT);
6053 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6054 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
6055 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
6057 #endif /* WL_CFG80211 */
6059 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
6060 #endif /* WLAIBSS */
6061 setbit(eventmask, WLC_E_TRACE);
6063 /* Write updated Event mask */
6064 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6065 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6066 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
6070 /* make up event mask ext message iovar for event larger than 128 */
6071 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
6072 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
6073 if (eventmask_msg == NULL) {
6074 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
6077 bzero(eventmask_msg, msglen);
6078 eventmask_msg->ver = EVENTMSGS_VER;
6079 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6081 /* Read event_msgs_ext mask */
6082 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, sizeof(iov_buf));
6083 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, sizeof(iov_buf), FALSE, 0);
6084 if (ret2 != BCME_UNSUPPORTED)
6086 if (ret2 == 0) { /* event_msgs_ext must be supported */
6087 bcopy(iov_buf, eventmask_msg, msglen);
6089 #ifdef BT_WIFI_HANDOVER
6090 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
6091 #endif /* BT_WIFI_HANDOVER */
6093 /* Write updated Event mask */
6094 eventmask_msg->ver = EVENTMSGS_VER;
6095 eventmask_msg->command = EVENTMSGS_SET_MASK;
6096 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6097 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
6098 msglen, iov_buf, sizeof(iov_buf));
6099 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6100 iov_buf, sizeof(iov_buf), TRUE, 0)) < 0) {
6101 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
6102 kfree(eventmask_msg);
6105 } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
6106 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
6107 kfree(eventmask_msg);
6109 } /* unsupported is ok */
6110 kfree(eventmask_msg);
6112 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
6113 sizeof(scan_assoc_time), TRUE, 0);
6114 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
6115 sizeof(scan_unassoc_time), TRUE, 0);
6116 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
6117 sizeof(scan_passive_time), TRUE, 0);
6119 #ifdef ARP_OFFLOAD_SUPPORT
6120 /* Set and enable ARP offload feature for STA only */
6122 if (arpoe && !ap_fw_loaded)
6127 dhd_arp_offload_enable(dhd, TRUE);
6128 dhd_arp_offload_set(dhd, dhd_arp_mode);
6130 dhd_arp_offload_enable(dhd, FALSE);
6131 dhd_arp_offload_set(dhd, 0);
6133 dhd_arp_enable = arpoe;
6134 #endif /* ARP_OFFLOAD_SUPPORT */
6136 #ifdef PKT_FILTER_SUPPORT
6137 /* Setup default defintions for pktfilter , enable in suspend */
6138 dhd->pktfilter_count = 6;
6139 /* Setup filter to allow only unicast */
6140 if (dhd_master_mode) {
6141 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
6142 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
6143 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
6144 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
6145 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
6146 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
6147 /* apply APP pktfilter */
6148 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
6150 dhd_conf_discard_pkt_filter(dhd);
6151 dhd_conf_add_pkt_filter(dhd);
6155 dhd_enable_packet_filter(0, dhd);
6157 #endif /* defined(SOFTAP) */
6158 dhd_set_packet_filter(dhd);
6159 #endif /* PKT_FILTER_SUPPORT */
6161 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
6162 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6163 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
6164 #endif /* DISABLE_11N */
6166 #ifdef AMPDU_VO_ENABLE
6167 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
6169 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6170 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6172 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
6174 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6175 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6177 #if defined(SOFTAP_TPUT_ENHANCE)
6178 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6179 dhd_bus_setidletime(dhd, (int)100);
6180 #ifdef DHDTCPACK_SUPPRESS
6181 dhd->tcpack_sup_enabled = FALSE;
6183 #if defined(DHD_TCP_WINSIZE_ADJUST)
6184 dhd_use_tcp_window_size_adjust = TRUE;
6187 memset(buf, 0, sizeof(buf));
6188 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
6189 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
6191 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6192 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6197 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
6199 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6203 #endif /* SOFTAP_TPUT_ENHANCE */
6205 /* query for 'ver' to get version info from firmware */
6206 memset(buf, 0, sizeof(buf));
6208 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
6209 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
6210 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6212 bcmstrtok(&ptr, "\n", 0);
6213 /* Print fw version info */
6214 DHD_ERROR(("Firmware version = %s\n", buf));
6215 #if defined(BCMSDIO)
6216 dhd_set_version_info(dhd, buf);
6217 #endif /* defined(BCMSDIO) */
6220 #if defined(BCMSDIO)
6221 dhd_txglom_enable(dhd, TRUE);
6222 #endif /* defined(BCMSDIO) */
6224 #if defined(BCMSDIO)
6225 #ifdef PROP_TXSTATUS
6226 if (disable_proptx ||
6227 #ifdef PROP_TXSTATUS_VSDB
6228 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
6229 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
6230 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
6231 #endif /* PROP_TXSTATUS_VSDB */
6233 wlfc_enable = FALSE;
6237 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
6238 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
6239 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6240 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
6241 if (ret2 != BCME_UNSUPPORTED)
6243 if (ret2 != BCME_OK)
6246 #endif /* DISABLE_11N */
6248 #ifdef READ_CONFIG_FROM_FILE
6249 dhd_preinit_config(dhd, 0);
6250 #endif /* READ_CONFIG_FROM_FILE */
6255 else if (hostreorder)
6256 dhd_wlfc_hostreorder_init(dhd);
6257 #endif /* DISABLE_11N */
6259 #endif /* PROP_TXSTATUS */
6260 #endif /* BCMSDIO || BCMBUS */
6261 #ifdef PCIE_FULL_DONGLE
6262 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
6263 if (FW_SUPPORTED(dhd, ap)) {
6264 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
6265 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
6266 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6267 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6269 #endif /* PCIE_FULL_DONGLE */
6271 if (!dhd->pno_state) {
6276 dhd_interworking_enable(dhd);
6285 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
6287 char buf[strlen(name) + 1 + cmd_len];
6288 int len = sizeof(buf);
6292 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
6294 memset(&ioc, 0, sizeof(ioc));
6296 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
6301 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
6302 if (!set && ret >= 0)
6303 memcpy(cmd_buf, buf, cmd_len);
6308 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
6310 struct dhd_info *dhd = dhdp->info;
6311 struct net_device *dev = NULL;
6313 ASSERT(dhd && dhd->iflist[ifidx]);
6314 dev = dhd->iflist[ifidx]->net;
6317 if (netif_running(dev)) {
6318 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
6319 return BCME_NOTDOWN;
6322 #define DHD_MIN_MTU 1500
6323 #define DHD_MAX_MTU 1752
6325 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
6326 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
6334 #ifdef ARP_OFFLOAD_SUPPORT
6335 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
6337 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
6339 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
6343 bzero(ipv4_buf, sizeof(ipv4_buf));
6345 /* display what we've got */
6346 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6347 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
6349 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6351 /* now we saved hoste_ip table, clr it in the dongle AOE */
6352 dhd_aoe_hostip_clr(dhd_pub, idx);
6355 DHD_ERROR(("%s failed\n", __FUNCTION__));
6359 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6360 if (add && (ipv4_buf[i] == 0)) {
6362 add = FALSE; /* added ipa to local table */
6363 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
6365 } else if (ipv4_buf[i] == ipa) {
6367 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
6368 __FUNCTION__, ipa, i));
6371 if (ipv4_buf[i] != 0) {
6372 /* add back host_ip entries from our local cache */
6373 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
6374 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
6375 __FUNCTION__, ipv4_buf[i], i));
6379 /* see the resulting hostip table */
6380 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6381 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
6382 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6387 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
6388 * whenever there is an event related to an IP address.
6389 * ptr : kernel provided pointer to IP address that has changed
6391 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
6392 unsigned long event,
6395 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
6401 if (!dhd_arp_enable)
6403 if (!ifa || !(ifa->ifa_dev->dev))
6406 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6407 /* Filter notifications meant for non Broadcom devices */
6408 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
6409 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
6410 #if defined(WL_ENABLE_P2P_IF)
6411 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
6412 #endif /* WL_ENABLE_P2P_IF */
6415 #endif /* LINUX_VERSION_CODE */
6417 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
6421 dhd_pub = &dhd->pub;
6423 if (dhd_pub->arp_version == 1) {
6427 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
6428 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
6431 if (idx < DHD_MAX_IFS)
6432 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
6433 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
6435 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
6442 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
6443 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
6445 if (dhd->pub.busstate != DHD_BUS_DATA) {
6446 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
6447 if (dhd->pend_ipaddr) {
6448 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
6449 __FUNCTION__, dhd->pend_ipaddr));
6451 dhd->pend_ipaddr = ifa->ifa_address;
6455 #ifdef AOE_IP_ALIAS_SUPPORT
6456 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
6458 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
6459 #endif /* AOE_IP_ALIAS_SUPPORT */
6463 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
6464 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
6465 dhd->pend_ipaddr = 0;
6466 #ifdef AOE_IP_ALIAS_SUPPORT
6467 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
6469 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
6471 dhd_aoe_hostip_clr(&dhd->pub, idx);
6472 dhd_aoe_arp_clr(&dhd->pub, idx);
6473 #endif /* AOE_IP_ALIAS_SUPPORT */
6477 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
6478 __func__, ifa->ifa_label, event));
6483 #endif /* ARP_OFFLOAD_SUPPORT */
6486 /* Neighbor Discovery Offload: defered handler */
6488 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
6490 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
6491 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
6494 if (event != DHD_WQ_WORK_IPV6_NDO) {
6495 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
6500 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
6505 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
6509 if (ndo_work->if_idx) {
6510 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
6514 switch (ndo_work->event) {
6516 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
6517 ret = dhd_ndo_enable(pub, TRUE);
6519 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
6522 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
6524 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
6525 __FUNCTION__, ret));
6529 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
6530 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
6532 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
6533 __FUNCTION__, ret));
6537 ret = dhd_ndo_enable(pub, FALSE);
6539 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
6544 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
6548 /* free ndo_work. alloced while scheduling the work */
6555 * Neighbor Discovery Offload: Called when an interface
6556 * is assigned with ipv6 address.
6557 * Handles only primary interface
6559 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
6560 unsigned long event,
6565 struct inet6_ifaddr *inet6_ifa = ptr;
6566 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
6567 struct ipv6_work_info_t *ndo_info;
6568 int idx = 0; /* REVISIT */
6570 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6571 /* Filter notifications meant for non Broadcom devices */
6572 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
6575 #endif /* LINUX_VERSION_CODE */
6577 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
6581 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
6583 dhd_pub = &dhd->pub;
6584 if (!FW_SUPPORTED(dhd_pub, ndoe))
6587 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
6589 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
6593 ndo_info->event = event;
6594 ndo_info->if_idx = idx;
6595 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
6597 /* defer the work to thread as it may block kernel */
6598 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
6599 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
6602 #endif /* #ifdef CONFIG_IPV6 */
6605 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
6607 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6609 struct net_device *net = NULL;
6611 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
6613 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6615 ASSERT(dhd && dhd->iflist[ifidx]);
6616 ifp = dhd->iflist[ifidx];
6618 ASSERT(net && (ifp->idx == ifidx));
6620 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
6622 net->get_stats = dhd_get_stats;
6623 net->do_ioctl = dhd_ioctl_entry;
6624 net->hard_start_xmit = dhd_start_xmit;
6625 net->set_mac_address = dhd_set_mac_address;
6626 net->set_multicast_list = dhd_set_multicast_list;
6627 net->open = net->stop = NULL;
6629 ASSERT(!net->netdev_ops);
6630 net->netdev_ops = &dhd_ops_virt;
6631 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
6633 /* Ok, link into the network layer... */
6636 * device functions for the primary interface only
6638 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
6639 net->open = dhd_open;
6640 net->stop = dhd_stop;
6642 net->netdev_ops = &dhd_ops_pri;
6643 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
6644 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
6645 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6648 * We have to use the primary MAC for virtual interfaces
6650 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
6652 * Android sets the locally administered bit to indicate that this is a
6653 * portable hotspot. This will not work in simultaneous AP/STA mode,
6654 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
6656 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
6658 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
6659 __func__, net->name));
6660 temp_addr[0] |= 0x02;
6664 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
6665 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6666 net->ethtool_ops = &dhd_ethtool_ops;
6667 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6669 #if defined(WL_WIRELESS_EXT)
6670 #if WIRELESS_EXT < 19
6671 net->get_wireless_stats = dhd_get_wireless_stats;
6672 #endif /* WIRELESS_EXT < 19 */
6673 #if WIRELESS_EXT > 12
6674 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
6675 #endif /* WIRELESS_EXT > 12 */
6676 #endif /* defined(WL_WIRELESS_EXT) */
6678 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
6680 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
6683 printf("%s\n", dhd_version);
6686 err = register_netdev(net);
6688 err = register_netdevice(net);
6691 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
6697 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
6698 MAC2STRDBG(net->dev_addr));
6700 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
6701 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
6704 #if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
6705 KERNEL_VERSION(2, 6, 27))))
6708 up(&dhd_registration_sem);
6710 if (!dhd_download_fw_on_driverload) {
6711 dhd_net_bus_devreset(net, TRUE);
6713 dhd_net_bus_suspend(net);
6714 #endif /* BCMLXSDMMC */
6715 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
6718 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
6722 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
6725 net->netdev_ops = NULL;
6731 dhd_bus_detach(dhd_pub_t *dhdp)
6735 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6738 dhd = (dhd_info_t *)dhdp->info;
6742 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
6743 * calling stop again will cuase SD read/write errors.
6745 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6746 /* Stop the protocol module */
6747 dhd_prot_stop(&dhd->pub);
6749 /* Stop the bus module */
6750 dhd_bus_stop(dhd->pub.bus, TRUE);
6753 #if defined(OOB_INTR_ONLY)
6754 dhd_bus_oob_intr_unregister(dhdp);
6761 void dhd_detach(dhd_pub_t *dhdp)
6764 unsigned long flags;
6765 int timer_valid = FALSE;
6770 dhd = (dhd_info_t *)dhdp->info;
6774 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
6777 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
6778 /* Give sufficient time for threads to start running in case
6779 * dhd_attach() has failed
6784 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
6785 #ifdef PCIE_FULL_DONGLE
6786 dhd_flow_rings_deinit(dhdp);
6788 dhd_bus_detach(dhdp);
6791 dhd_prot_detach(dhdp);
6794 #ifdef ARP_OFFLOAD_SUPPORT
6795 if (dhd_inetaddr_notifier_registered) {
6796 dhd_inetaddr_notifier_registered = FALSE;
6797 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6799 #endif /* ARP_OFFLOAD_SUPPORT */
6801 if (dhd_inet6addr_notifier_registered) {
6802 dhd_inet6addr_notifier_registered = FALSE;
6803 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6807 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
6808 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
6809 if (dhd->early_suspend.suspend)
6810 unregister_early_suspend(&dhd->early_suspend);
6812 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
6814 #if defined(WL_WIRELESS_EXT)
6815 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
6816 /* Detatch and unlink in the iw */
6819 #endif /* defined(WL_WIRELESS_EXT) */
6821 /* delete all interfaces, start with virtual */
6822 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
6826 /* Cleanup virtual interfaces */
6827 dhd_net_if_lock_local(dhd);
6828 for (i = 1; i < DHD_MAX_IFS; i++) {
6830 dhd_remove_if(&dhd->pub, i, TRUE);
6832 dhd_net_if_unlock_local(dhd);
6834 /* delete primary interface 0 */
6835 ifp = dhd->iflist[0];
6838 if (ifp && ifp->net) {
6842 /* in unregister_netdev case, the interface gets freed by net->destructor
6843 * (which is set to free_netdev)
6845 if (ifp->net->reg_state == NETREG_UNINITIALIZED)
6846 free_netdev(ifp->net);
6848 unregister_netdev(ifp->net);
6851 dhd_wmf_cleanup(dhdp, 0);
6852 #endif /* DHD_WMF */
6854 dhd_if_del_sta_list(ifp);
6856 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
6857 dhd->iflist[0] = NULL;
6861 /* Clear the watchdog timer */
6862 DHD_GENERAL_LOCK(&dhd->pub, flags);
6863 timer_valid = dhd->wd_timer_valid;
6864 dhd->wd_timer_valid = FALSE;
6865 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6867 del_timer_sync(&dhd->timer);
6869 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
6870 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
6871 PROC_STOP(&dhd->thr_wdt_ctl);
6874 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
6875 PROC_STOP(&dhd->thr_rxf_ctl);
6878 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
6879 PROC_STOP(&dhd->thr_dpc_ctl);
6881 tasklet_kill(&dhd->tasklet);
6884 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
6885 wl_cfg80211_detach(NULL);
6886 dhd_monitor_uninit();
6889 /* free deferred work queue */
6890 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
6891 dhd->dhd_deferred_wq = NULL;
6893 #ifdef SHOW_LOGTRACE
6894 if (dhd->event_data.fmts)
6895 kfree(dhd->event_data.fmts);
6896 if (dhd->event_data.raw_fmts)
6897 kfree(dhd->event_data.raw_fmts);
6898 #endif /* SHOW_LOGTRACE */
6901 if (dhdp->pno_state)
6902 dhd_pno_deinit(dhdp);
6904 #if defined(CONFIG_PM_SLEEP)
6905 if (dhd_pm_notifier_registered) {
6906 unregister_pm_notifier(&dhd_pm_notifier);
6907 dhd_pm_notifier_registered = FALSE;
6909 #endif /* CONFIG_PM_SLEEP */
6910 #ifdef DEBUG_CPU_FREQ
6912 free_percpu(dhd->new_freq);
6913 dhd->new_freq = NULL;
6914 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
6916 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
6917 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
6918 #ifdef CONFIG_HAS_WAKELOCK
6919 dhd->wakelock_counter = 0;
6920 dhd->wakelock_wd_counter = 0;
6921 dhd->wakelock_rx_timeout_enable = 0;
6922 dhd->wakelock_ctrl_timeout_enable = 0;
6923 wake_lock_destroy(&dhd->wl_wifi);
6924 wake_lock_destroy(&dhd->wl_rxwake);
6925 wake_lock_destroy(&dhd->wl_ctrlwake);
6926 wake_lock_destroy(&dhd->wl_wdwake);
6927 #endif /* CONFIG_HAS_WAKELOCK */
6932 #ifdef DHDTCPACK_SUPPRESS
6933 /* This will free all MEM allocated for TCPACK SUPPRESS */
6934 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6935 #endif /* DHDTCPACK_SUPPRESS */
6936 dhd_conf_detach(dhdp);
6941 dhd_free(dhd_pub_t *dhdp)
6944 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6948 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
6949 if (dhdp->reorder_bufs[i]) {
6950 reorder_info_t *ptr;
6951 uint32 buf_size = sizeof(struct reorder_info);
6953 ptr = dhdp->reorder_bufs[i];
6955 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
6956 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
6957 i, ptr->max_idx, buf_size));
6959 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
6960 dhdp->reorder_bufs[i] = NULL;
6964 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
6966 dhd = (dhd_info_t *)dhdp->info;
6967 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
6969 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
6970 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
6976 dhd_module_cleanup(void)
6978 printk("%s: Enter\n", __FUNCTION__);
6980 dhd_bus_unregister();
6984 dhd_wifi_platform_unregister_drv();
6985 printk("%s: Exit\n", __FUNCTION__);
6989 dhd_module_exit(void)
6991 dhd_module_cleanup();
6992 unregister_reboot_notifier(&dhd_reboot_notifier);
6996 dhd_module_init(void)
6999 int retry = POWERUP_MAX_RETRY;
7001 printk("%s: in\n", __FUNCTION__);
7003 DHD_PERIM_RADIO_INIT();
7005 if (firmware_path[0] != '\0') {
7006 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
7007 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7010 if (nvram_path[0] != '\0') {
7011 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
7012 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7016 err = dhd_wifi_platform_register_drv();
7018 register_reboot_notifier(&dhd_reboot_notifier);
7022 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
7023 __FUNCTION__, retry));
7024 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
7025 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
7026 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
7027 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
7032 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
7034 printk("%s: Exit err=%d\n", __FUNCTION__, err);
7039 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
7041 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
7042 if (code == SYS_RESTART) {
7048 extern char WIFI_MODULE_NAME[];
7049 extern char RKWIFI_DRV_VERSION[];
7051 int rockchip_wifi_init_module_rkwifi(void)
7053 printk("=======================================================\n");
7054 printk("==== Launching Wi-Fi driver! (Powered by Rockchip) ====\n");
7055 printk("=======================================================\n");
7056 printk("%s WiFi driver (Powered by Rockchip,Ver %s) init.\n", WIFI_MODULE_NAME, RKWIFI_DRV_VERSION);
7058 return dhd_module_init();
7061 void rockchip_wifi_exit_module_rkwifi(void)
7063 printk("=======================================================\n");
7064 printk("== Dis-launching Wi-Fi driver! (Powered by Rockchip) ==\n");
7065 printk("=======================================================\n");
7069 EXPORT_SYMBOL(rockchip_wifi_init_module_rkwifi);
7070 EXPORT_SYMBOL(rockchip_wifi_exit_module_rkwifi);
7071 //#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7072 //#if defined(CONFIG_DEFERRED_INITCALLS)
7073 //deferred_module_init(dhd_module_init);
7074 //#elif defined(USE_LATE_INITCALL_SYNC)
7075 //late_initcall_sync(dhd_module_init);
7077 //late_initcall(dhd_module_init);
7078 //#endif /* USE_LATE_INITCALL_SYNC */
7080 //module_init(dhd_module_init);
7081 //#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7083 //module_exit(dhd_module_exit);
7086 * OS specific functions required to implement DHD driver in OS independent way
7089 dhd_os_proto_block(dhd_pub_t *pub)
7091 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7094 DHD_PERIM_UNLOCK(pub);
7096 down(&dhd->proto_sem);
7098 DHD_PERIM_LOCK(pub);
7106 dhd_os_proto_unblock(dhd_pub_t *pub)
7108 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7111 up(&dhd->proto_sem);
7119 dhd_os_get_ioctl_resp_timeout(void)
7121 return ((unsigned int)dhd_ioctl_timeout_msec);
7125 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
7127 dhd_ioctl_timeout_msec = (int)timeout_msec;
7131 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
7133 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7136 /* Convert timeout in millsecond to jiffies */
7137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7138 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
7140 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
7143 DHD_PERIM_UNLOCK(pub);
7145 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
7147 DHD_PERIM_LOCK(pub);
7153 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
7155 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
7157 wake_up(&dhd->ioctl_resp_wait);
7162 dhd_os_wd_timer_extend(void *bus, bool extend)
7164 dhd_pub_t *pub = bus;
7165 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7168 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
7170 dhd_os_wd_timer(bus, dhd->default_wd_interval);
7175 dhd_os_wd_timer(void *bus, uint wdtick)
7177 dhd_pub_t *pub = bus;
7178 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7179 unsigned long flags;
7181 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7184 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
7188 DHD_GENERAL_LOCK(pub, flags);
7190 /* don't start the wd until fw is loaded */
7191 if (pub->busstate == DHD_BUS_DOWN) {
7192 DHD_GENERAL_UNLOCK(pub, flags);
7194 DHD_OS_WD_WAKE_UNLOCK(pub);
7198 /* Totally stop the timer */
7199 if (!wdtick && dhd->wd_timer_valid == TRUE) {
7200 dhd->wd_timer_valid = FALSE;
7201 DHD_GENERAL_UNLOCK(pub, flags);
7202 del_timer_sync(&dhd->timer);
7203 DHD_OS_WD_WAKE_UNLOCK(pub);
7208 DHD_OS_WD_WAKE_LOCK(pub);
7209 dhd_watchdog_ms = (uint)wdtick;
7210 /* Re arm the timer, at last watchdog period */
7211 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
7212 dhd->wd_timer_valid = TRUE;
7214 DHD_GENERAL_UNLOCK(pub, flags);
7218 dhd_os_open_image(char *filename)
7222 fp = filp_open(filename, O_RDONLY, 0);
7224 * 2.6.11 (FC4) supports filp_open() but later revs don't?
7226 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
7236 dhd_os_get_image_block(char *buf, int len, void *image)
7238 struct file *fp = (struct file *)image;
7244 rdlen = kernel_read(fp, fp->f_pos, buf, len);
7252 dhd_os_close_image(void *image)
7255 filp_close((struct file *)image, NULL);
7259 dhd_os_sdlock(dhd_pub_t *pub)
7263 dhd = (dhd_info_t *)(pub->info);
7265 if (dhd_dpc_prio >= 0)
7268 spin_lock_bh(&dhd->sdlock);
7272 dhd_os_sdunlock(dhd_pub_t *pub)
7276 dhd = (dhd_info_t *)(pub->info);
7278 if (dhd_dpc_prio >= 0)
7281 spin_unlock_bh(&dhd->sdlock);
7285 dhd_os_sdlock_txq(dhd_pub_t *pub)
7289 dhd = (dhd_info_t *)(pub->info);
7290 spin_lock_bh(&dhd->txqlock);
7294 dhd_os_sdunlock_txq(dhd_pub_t *pub)
7298 dhd = (dhd_info_t *)(pub->info);
7299 spin_unlock_bh(&dhd->txqlock);
7303 dhd_os_sdlock_rxq(dhd_pub_t *pub)
7308 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
7313 dhd_os_rxflock(dhd_pub_t *pub)
7317 dhd = (dhd_info_t *)(pub->info);
7318 spin_lock_bh(&dhd->rxf_lock);
7323 dhd_os_rxfunlock(dhd_pub_t *pub)
7327 dhd = (dhd_info_t *)(pub->info);
7328 spin_unlock_bh(&dhd->rxf_lock);
7331 #ifdef DHDTCPACK_SUPPRESS
7333 dhd_os_tcpacklock(dhd_pub_t *pub)
7337 dhd = (dhd_info_t *)(pub->info);
7338 spin_lock_bh(&dhd->tcpack_lock);
7343 dhd_os_tcpackunlock(dhd_pub_t *pub)
7347 dhd = (dhd_info_t *)(pub->info);
7348 spin_unlock_bh(&dhd->tcpack_lock);
7350 #endif /* DHDTCPACK_SUPPRESS */
7352 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
7355 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
7357 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
7358 if (buf == NULL && kmalloc_if_fail)
7359 buf = kmalloc(size, flags);
7364 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
7368 #if defined(WL_WIRELESS_EXT)
7369 struct iw_statistics *
7370 dhd_get_wireless_stats(struct net_device *dev)
7373 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7379 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
7382 return &dhd->iw.wstats;
7386 #endif /* defined(WL_WIRELESS_EXT) */
7389 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
7390 wl_event_msg_t *event, void **data)
7393 ASSERT(dhd != NULL);
7395 #ifdef SHOW_LOGTRACE
7396 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
7398 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
7399 #endif /* SHOW_LOGTRACE */
7401 if (bcmerror != BCME_OK)
7404 #if defined(WL_WIRELESS_EXT)
7405 if (event->bsscfgidx == 0) {
7407 * Wireless ext is on primary interface only
7410 ASSERT(dhd->iflist[*ifidx] != NULL);
7411 ASSERT(dhd->iflist[*ifidx]->net != NULL);
7413 if (dhd->iflist[*ifidx]->net) {
7414 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
7417 #endif /* defined(WL_WIRELESS_EXT) */
7420 ASSERT(dhd->iflist[*ifidx] != NULL);
7421 ASSERT(dhd->iflist[*ifidx]->net != NULL);
7422 if (dhd->iflist[*ifidx]->net)
7423 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
7424 #endif /* defined(WL_CFG80211) */
7429 /* send up locally generated event */
7431 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
7433 switch (ntoh32(event->event_type)) {
7435 /* Send up locally generated AMP HCI Events */
7436 case WLC_E_BTA_HCI_EVENT: {
7437 struct sk_buff *p, *skb;
7439 wl_event_msg_t *p_bcm_event;
7448 len = ntoh32(event->datalen);
7449 pktlen = sizeof(bcm_event_t) + len + 2;
7451 ifidx = dhd_ifname2idx(dhd, event->ifname);
7453 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
7454 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
7456 msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
7458 bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
7459 bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
7460 ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
7462 msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
7464 /* BCM Vendor specific header... */
7465 msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
7466 msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
7467 bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
7469 /* vendor spec header length + pvt data length (private indication
7470 * hdr + actual message itself)
7472 msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
7473 BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
7474 msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
7476 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
7478 /* copy wl_event_msg_t into sk_buf */
7480 /* pointer to wl_event_msg_t in sk_buf */
7481 p_bcm_event = &msg->event;
7482 bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
7484 /* copy hci event into sk_buf */
7485 bcopy(data, (p_bcm_event + 1), len);
7487 msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
7488 ntoh16(msg->bcm_hdr.length));
7489 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
7491 ptr = (char *)(msg + 1);
7492 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
7493 * are no ethertypes which are following this
7498 skb = PKTTONATIVE(dhdp->osh, p);
7502 ifp = dhd->iflist[ifidx];
7504 ifp = dhd->iflist[0];
7507 skb->dev = ifp->net;
7508 skb->protocol = eth_type_trans(skb, skb->dev);
7513 /* Strip header, count, deliver upward */
7514 skb_pull(skb, ETH_HLEN);
7516 /* Send the packet */
7517 if (in_interrupt()) {
7524 /* Could not allocate a sk_buf */
7525 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
7528 } /* case WLC_E_BTA_HCI_EVENT */
7529 #endif /* WLBTAMP */
7536 #ifdef LOG_INTO_TCPDUMP
7538 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
7540 struct sk_buff *p, *skb;
7547 struct ether_header eth;
7549 pktlen = sizeof(eth) + data_len;
7552 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
7553 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
7555 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
7556 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
7557 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
7558 eth.ether_type = hton16(ETHER_TYPE_BRCM);
7560 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
7561 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
7562 skb = PKTTONATIVE(dhdp->osh, p);
7563 skb_data = skb->data;
7566 ifidx = dhd_ifname2idx(dhd, "wlan0");
7567 ifp = dhd->iflist[ifidx];
7569 ifp = dhd->iflist[0];
7572 skb->dev = ifp->net;
7573 skb->protocol = eth_type_trans(skb, skb->dev);
7574 skb->data = skb_data;
7577 /* Strip header, count, deliver upward */
7578 skb_pull(skb, ETH_HLEN);
7580 /* Send the packet */
7581 if (in_interrupt()) {
7588 /* Could not allocate a sk_buf */
7589 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
7592 #endif /* LOG_INTO_TCPDUMP */
7594 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
7596 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
7597 struct dhd_info *dhdinfo = dhd->info;
7599 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7600 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
7602 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
7603 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
7605 dhd_os_sdunlock(dhd);
7606 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
7608 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
7612 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
7614 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
7615 struct dhd_info *dhdinfo = dhd->info;
7616 if (waitqueue_active(&dhdinfo->ctrl_wait))
7617 wake_up(&dhdinfo->ctrl_wait);
7622 #if defined(BCMSDIO) || defined(BCMPCIE)
7624 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
7627 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7630 /* Issue wl down command before resetting the chip */
7631 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
7632 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
7634 #ifdef PROP_TXSTATUS
7635 if (dhd->pub.wlfc_enabled)
7636 dhd_wlfc_deinit(&dhd->pub);
7637 #endif /* PROP_TXSTATUS */
7639 if (dhd->pub.pno_state)
7640 dhd_pno_deinit(&dhd->pub);
7646 dhd_update_fw_nv_path(dhd);
7647 /* update firmware and nvram path to sdio bus */
7648 dhd_bus_update_fw_nv_path(dhd->pub.bus,
7649 dhd->fw_path, dhd->nv_path, dhd->conf_path);
7651 #endif /* BCMSDIO */
7653 ret = dhd_bus_devreset(&dhd->pub, flag);
7655 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
7664 dhd_net_bus_suspend(struct net_device *dev)
7666 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7667 return dhd_bus_suspend(&dhd->pub);
7671 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
7673 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7674 return dhd_bus_resume(&dhd->pub, stage);
7677 #endif /* BCMSDIO */
7678 #endif /* BCMSDIO || BCMPCIE */
7680 int net_os_set_suspend_disable(struct net_device *dev, int val)
7682 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7686 ret = dhd->pub.suspend_disable_flag;
7687 dhd->pub.suspend_disable_flag = val;
7692 int net_os_set_suspend(struct net_device *dev, int val, int force)
7695 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7698 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7699 ret = dhd_set_suspend(val, &dhd->pub);
7701 ret = dhd_suspend_resume_helper(dhd, val, force);
7704 wl_cfg80211_update_power_mode(dev);
7710 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
7712 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7715 dhd->pub.suspend_bcn_li_dtim = val;
7720 #ifdef PKT_FILTER_SUPPORT
7721 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
7723 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7724 char *filterp = NULL;
7728 if (!dhd_master_mode)
7729 add_remove = !add_remove;
7731 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
7732 (num == DHD_MDNS_FILTER_NUM))
7734 if (num >= dhd->pub.pktfilter_count)
7737 case DHD_BROADCAST_FILTER_NUM:
7738 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
7741 case DHD_MULTICAST4_FILTER_NUM:
7742 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
7745 case DHD_MULTICAST6_FILTER_NUM:
7746 filterp = "103 0 0 0 0xFFFF 0x3333";
7755 dhd->pub.pktfilter[num] = filterp;
7756 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
7757 } else { /* Delete filter */
7758 if (dhd->pub.pktfilter[num] != NULL) {
7759 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
7760 dhd->pub.pktfilter[num] = NULL;
7766 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
7771 /* Packet filtering is set only if we still in early-suspend and
7772 * we need either to turn it ON or turn it OFF
7773 * We can always turn it OFF in case of early-suspend, but we turn it
7774 * back ON only if suspend_disable_flag was not set
7776 if (dhdp && dhdp->up) {
7777 if (dhdp->in_suspend) {
7778 if (!val || (val && !dhdp->suspend_disable_flag))
7779 dhd_enable_packet_filter(val, dhdp);
7785 /* function to enable/disable packet for Network device */
7786 int net_os_enable_packet_filter(struct net_device *dev, int val)
7788 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7790 return dhd_os_enable_packet_filter(&dhd->pub, val);
7792 #endif /* PKT_FILTER_SUPPORT */
7795 dhd_dev_init_ioctl(struct net_device *dev)
7797 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7800 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
7808 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
7810 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
7812 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7814 return (dhd_pno_stop_for_ssid(&dhd->pub));
7816 /* Linux wrapper to call common dhd_pno_set_for_ssid */
7818 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
7819 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
7821 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7823 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
7824 pno_repeat, pno_freq_expo_max, channel_list, nchan));
7827 /* Linux wrapper to call common dhd_pno_enable */
7829 dhd_dev_pno_enable(struct net_device *dev, int enable)
7831 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7833 return (dhd_pno_enable(&dhd->pub, enable));
7836 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
7838 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
7839 struct dhd_pno_hotlist_params *hotlist_params)
7841 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7842 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
7844 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
7846 dhd_dev_pno_stop_for_batch(struct net_device *dev)
7848 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7849 return (dhd_pno_stop_for_batch(&dhd->pub));
7851 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
7853 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
7855 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7856 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
7858 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
7860 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
7862 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7863 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
7865 #endif /* PNO_SUPPORT */
7867 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
7868 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
7871 struct net_device *dev;
7873 dhd = (dhd_info_t *)dhd_info;
7874 dev = dhd->iflist[0]->net;
7880 #if defined(WL_WIRELESS_EXT)
7881 wl_iw_send_priv_event(dev, "HANG");
7883 #if defined(WL_CFG80211)
7884 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
7889 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
7893 if (!dhdp->hang_was_sent) {
7894 dhdp->hang_was_sent = 1;
7895 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
7896 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
7902 int net_os_send_hang_message(struct net_device *dev)
7904 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7908 /* Report FW problem when enabled */
7909 if (dhd->pub.hang_report) {
7910 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7911 ret = dhd_os_send_hang_message(&dhd->pub);
7913 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
7916 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
7918 /* Enforce bus down to stop any future traffic */
7919 dhd->pub.busstate = DHD_BUS_DOWN;
7924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
7927 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
7929 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7930 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
7933 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
7934 wl_country_t *cspec)
7936 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7937 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
7939 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
7941 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7942 if (dhd && dhd->pub.up) {
7943 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
7945 wl_update_wiphybands(NULL, notify);
7950 void dhd_bus_band_set(struct net_device *dev, uint band)
7952 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7953 if (dhd && dhd->pub.up) {
7955 wl_update_wiphybands(NULL, true);
7960 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
7962 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7964 if (!fw || fw[0] == '\0')
7967 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
7968 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
7971 if (strstr(fw, "apsta") != NULL) {
7972 DHD_INFO(("GOT APSTA FIRMWARE\n"));
7973 ap_fw_loaded = TRUE;
7975 DHD_INFO(("GOT STA FIRMWARE\n"));
7976 ap_fw_loaded = FALSE;
7982 void dhd_net_if_lock(struct net_device *dev)
7984 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7985 dhd_net_if_lock_local(dhd);
7988 void dhd_net_if_unlock(struct net_device *dev)
7990 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7991 dhd_net_if_unlock_local(dhd);
7994 static void dhd_net_if_lock_local(dhd_info_t *dhd)
7996 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
7998 mutex_lock(&dhd->dhd_net_if_mutex);
8002 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
8004 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8006 mutex_unlock(&dhd->dhd_net_if_mutex);
8010 static void dhd_suspend_lock(dhd_pub_t *pub)
8012 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8013 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8015 mutex_lock(&dhd->dhd_suspend_mutex);
8019 static void dhd_suspend_unlock(dhd_pub_t *pub)
8021 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8022 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8024 mutex_unlock(&dhd->dhd_suspend_mutex);
8028 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
8030 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8031 unsigned long flags = 0;
8034 spin_lock_irqsave(&dhd->dhd_lock, flags);
8039 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
8041 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8044 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
8047 /* Linux specific multipurpose spinlock API */
8049 dhd_os_spin_lock_init(osl_t *osh)
8051 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
8052 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
8053 /* and this results in kernel asserts in internal builds */
8054 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
8056 spin_lock_init(lock);
8057 return ((void *)lock);
8060 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
8062 MFREE(osh, lock, sizeof(spinlock_t) + 4);
8065 dhd_os_spin_lock(void *lock)
8067 unsigned long flags = 0;
8070 spin_lock_irqsave((spinlock_t *)lock, flags);
8075 dhd_os_spin_unlock(void *lock, unsigned long flags)
8078 spin_unlock_irqrestore((spinlock_t *)lock, flags);
8082 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
8084 return (atomic_read(&dhd->pend_8021x_cnt));
8087 #define MAX_WAIT_FOR_8021X_TX 100
8090 dhd_wait_pend8021x(struct net_device *dev)
8092 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8093 int timeout = msecs_to_jiffies(10);
8094 int ntimes = MAX_WAIT_FOR_8021X_TX;
8095 int pend = dhd_get_pend_8021x_cnt(dhd);
8097 while (ntimes && pend) {
8099 set_current_state(TASK_INTERRUPTIBLE);
8100 DHD_PERIM_UNLOCK(&dhd->pub);
8101 schedule_timeout(timeout);
8102 DHD_PERIM_LOCK(&dhd->pub);
8103 set_current_state(TASK_RUNNING);
8106 pend = dhd_get_pend_8021x_cnt(dhd);
8110 atomic_set(&dhd->pend_8021x_cnt, 0);
8111 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
8118 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
8122 mm_segment_t old_fs;
8125 /* change to KERNEL_DS address limit */
8129 /* open file to write */
8130 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
8132 printf("%s: open file error\n", __FUNCTION__);
8137 /* Write buf to file */
8138 fp->f_op->write(fp, buf, size, &pos);
8141 /* free buf before return */
8142 MFREE(dhd->osh, buf, size);
8143 /* close file before return */
8145 filp_close(fp, current->files);
8146 /* restore previous address limit */
8151 #endif /* DHD_DEBUG */
8153 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
8155 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8156 unsigned long flags;
8160 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8161 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
8162 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
8163 #ifdef CONFIG_HAS_WAKELOCK
8164 if (dhd->wakelock_rx_timeout_enable)
8165 wake_lock_timeout(&dhd->wl_rxwake,
8166 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
8167 if (dhd->wakelock_ctrl_timeout_enable)
8168 wake_lock_timeout(&dhd->wl_ctrlwake,
8169 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
8171 dhd->wakelock_rx_timeout_enable = 0;
8172 dhd->wakelock_ctrl_timeout_enable = 0;
8173 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8178 int net_os_wake_lock_timeout(struct net_device *dev)
8180 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8184 ret = dhd_os_wake_lock_timeout(&dhd->pub);
8188 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
8190 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8191 unsigned long flags;
8194 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8195 if (val > dhd->wakelock_rx_timeout_enable)
8196 dhd->wakelock_rx_timeout_enable = val;
8197 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8202 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
8204 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8205 unsigned long flags;
8208 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8209 if (val > dhd->wakelock_ctrl_timeout_enable)
8210 dhd->wakelock_ctrl_timeout_enable = val;
8211 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8216 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
8218 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8219 unsigned long flags;
8222 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8223 dhd->wakelock_ctrl_timeout_enable = 0;
8224 #ifdef CONFIG_HAS_WAKELOCK
8225 if (wake_lock_active(&dhd->wl_ctrlwake))
8226 wake_unlock(&dhd->wl_ctrlwake);
8228 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8233 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
8235 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8239 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
8243 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
8245 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8249 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
8253 int dhd_os_wake_lock(dhd_pub_t *pub)
8255 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8256 unsigned long flags;
8260 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8262 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
8263 #ifdef CONFIG_HAS_WAKELOCK
8264 wake_lock(&dhd->wl_wifi);
8265 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8266 dhd_bus_dev_pm_stay_awake(pub);
8269 dhd->wakelock_counter++;
8270 ret = dhd->wakelock_counter;
8271 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8276 int net_os_wake_lock(struct net_device *dev)
8278 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8282 ret = dhd_os_wake_lock(&dhd->pub);
8286 int dhd_os_wake_unlock(dhd_pub_t *pub)
8288 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8289 unsigned long flags;
8292 dhd_os_wake_lock_timeout(pub);
8294 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8295 if (dhd->wakelock_counter > 0) {
8296 dhd->wakelock_counter--;
8297 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
8298 #ifdef CONFIG_HAS_WAKELOCK
8299 wake_unlock(&dhd->wl_wifi);
8300 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8301 dhd_bus_dev_pm_relax(pub);
8304 ret = dhd->wakelock_counter;
8306 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8311 int dhd_os_check_wakelock(dhd_pub_t *pub)
8313 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
8314 KERNEL_VERSION(2, 6, 36)))
8319 dhd = (dhd_info_t *)(pub->info);
8320 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
8322 #ifdef CONFIG_HAS_WAKELOCK
8323 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
8324 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
8325 (wake_lock_active(&dhd->wl_wdwake))))
8327 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8328 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
8333 int net_os_wake_unlock(struct net_device *dev)
8335 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8339 ret = dhd_os_wake_unlock(&dhd->pub);
8343 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
8345 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8346 unsigned long flags;
8350 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8351 #ifdef CONFIG_HAS_WAKELOCK
8352 /* if wakelock_wd_counter was never used : lock it at once */
8353 if (!dhd->wakelock_wd_counter)
8354 wake_lock(&dhd->wl_wdwake);
8356 dhd->wakelock_wd_counter++;
8357 ret = dhd->wakelock_wd_counter;
8358 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8363 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
8365 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8366 unsigned long flags;
8370 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8371 if (dhd->wakelock_wd_counter) {
8372 dhd->wakelock_wd_counter = 0;
8373 #ifdef CONFIG_HAS_WAKELOCK
8374 wake_unlock(&dhd->wl_wdwake);
8377 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8382 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
8383 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
8385 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
8387 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8388 unsigned long flags;
8392 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8393 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
8394 if (dhd->waive_wakelock == FALSE) {
8395 /* record current lock status */
8396 dhd->wakelock_before_waive = dhd->wakelock_counter;
8397 dhd->waive_wakelock = TRUE;
8399 ret = dhd->wakelock_wd_counter;
8400 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8405 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
8407 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8408 unsigned long flags;
8414 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8415 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
8416 if (!dhd->waive_wakelock)
8419 dhd->waive_wakelock = FALSE;
8420 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
8421 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
8422 * the lock in between, do the same by calling wake_unlock or pm_relax
8424 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
8425 #ifdef CONFIG_HAS_WAKELOCK
8426 wake_lock(&dhd->wl_wifi);
8427 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8428 dhd_bus_dev_pm_stay_awake(&dhd->pub);
8430 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
8431 #ifdef CONFIG_HAS_WAKELOCK
8432 wake_unlock(&dhd->wl_wifi);
8433 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8434 dhd_bus_dev_pm_relax(&dhd->pub);
8437 dhd->wakelock_before_waive = 0;
8439 ret = dhd->wakelock_wd_counter;
8440 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8444 bool dhd_os_check_if_up(dhd_pub_t *pub)
8451 #if defined(BCMSDIO)
8452 /* function to collect firmware, chip id and chip version info */
8453 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
8457 i = snprintf(info_string, sizeof(info_string),
8458 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
8459 printf("%s\n", info_string);
8464 i = snprintf(&info_string[i], sizeof(info_string) - i,
8465 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
8466 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
8468 #endif /* defined(BCMSDIO) */
8469 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
8473 dhd_info_t *dhd = NULL;
8475 if (!net || !DEV_PRIV(net)) {
8476 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
8480 dhd = DHD_DEV_INFO(net);
8484 ifidx = dhd_net2idx(dhd, net);
8485 if (ifidx == DHD_BAD_IF) {
8486 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
8490 DHD_OS_WAKE_LOCK(&dhd->pub);
8491 DHD_PERIM_LOCK(&dhd->pub);
8493 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
8494 dhd_check_hang(net, &dhd->pub, ret);
8496 DHD_PERIM_UNLOCK(&dhd->pub);
8497 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8502 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
8504 struct net_device *net;
8506 net = dhd_idx2net(dhdp, ifidx);
8508 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
8512 return dhd_check_hang(net, dhdp, ret);
8515 /* Return instance */
8516 int dhd_get_instance(dhd_pub_t *dhdp)
8518 return dhdp->info->unit;
8522 #ifdef PROP_TXSTATUS
8524 void dhd_wlfc_plat_init(void *dhd)
8529 void dhd_wlfc_plat_deinit(void *dhd)
8534 bool dhd_wlfc_skip_fc(void)
8538 #endif /* PROP_TXSTATUS */
8542 #include <linux/debugfs.h>
8544 extern uint32 dhd_readregl(void *bp, uint32 addr);
8545 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
8547 typedef struct dhd_dbgfs {
8548 struct dentry *debugfs_dir;
8549 struct dentry *debugfs_mem;
8554 dhd_dbgfs_t g_dbgfs;
8557 dhd_dbg_state_open(struct inode *inode, struct file *file)
8559 file->private_data = inode->i_private;
8564 dhd_dbg_state_read(struct file *file, char __user *ubuf,
8565 size_t count, loff_t *ppos)
8574 if (pos >= g_dbgfs.size || !count)
8576 if (count > g_dbgfs.size - pos)
8577 count = g_dbgfs.size - pos;
8579 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
8580 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
8582 ret = copy_to_user(ubuf, &tmp, 4);
8587 *ppos = pos + count;
8595 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
8603 if (pos >= g_dbgfs.size || !count)
8605 if (count > g_dbgfs.size - pos)
8606 count = g_dbgfs.size - pos;
8608 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
8612 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
8613 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
8620 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
8629 pos = file->f_pos + off;
8632 pos = g_dbgfs.size - off;
8634 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
8637 static const struct file_operations dhd_dbg_state_ops = {
8638 .read = dhd_dbg_state_read,
8639 .write = dhd_debugfs_write,
8640 .open = dhd_dbg_state_open,
8641 .llseek = dhd_debugfs_lseek
8644 static void dhd_dbg_create(void)
8646 if (g_dbgfs.debugfs_dir) {
8647 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
8648 NULL, &dhd_dbg_state_ops);
8652 void dhd_dbg_init(dhd_pub_t *dhdp)
8656 g_dbgfs.dhdp = dhdp;
8657 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
8659 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
8660 if (IS_ERR(g_dbgfs.debugfs_dir)) {
8661 err = PTR_ERR(g_dbgfs.debugfs_dir);
8662 g_dbgfs.debugfs_dir = NULL;
8671 void dhd_dbg_remove(void)
8673 debugfs_remove(g_dbgfs.debugfs_mem);
8674 debugfs_remove(g_dbgfs.debugfs_dir);
8676 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
8679 #endif /* ifdef BCMDBGFS */
8684 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
8686 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
8687 struct sk_buff *skb;
8689 uint16 dport = 0, oldmagic = 0xACAC;
8693 /* timestamp packet */
8695 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
8697 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
8698 /* memcpy(&proto, p1+26, 4); */
8699 memcpy(&dport, p1+40, 2);
8700 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
8701 dport = ntoh16(dport);
8704 /* timestamp only if icmp or udb iperf with port 5555 */
8705 /* if (proto == 17 && dport == tsport) { */
8706 if (dport >= tsport && dport <= tsport + 20) {
8708 skb = (struct sk_buff *) pktbuf;
8710 htsf = dhd_get_htsf(dhd, 0);
8711 memset(skb->data + 44, 0, 2); /* clear checksum */
8712 memcpy(skb->data+82, &oldmagic, 2);
8713 memcpy(skb->data+84, &htsf, 4);
8715 memset(&ts, 0, sizeof(htsfts_t));
8716 ts.magic = HTSFMAGIC;
8717 ts.prio = PKTPRIO(pktbuf);
8718 ts.seqnum = htsf_seqnum++;
8719 ts.c10 = get_cycles();
8721 ts.endmagic = HTSFENDMAGIC;
8723 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
8727 static void dhd_dump_htsfhisto(histo_t *his, char *s)
8729 int pktcnt = 0, curval = 0, i;
8730 for (i = 0; i < (NUMBIN-2); i++) {
8732 printf("%d ", his->bin[i]);
8733 pktcnt += his->bin[i];
8735 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
8736 his->bin[NUMBIN-1], s);
8740 void sorttobin(int value, histo_t *histo)
8745 histo->bin[NUMBIN-1]++;
8748 if (value > histo->bin[NUMBIN-2]) /* store the max value */
8749 histo->bin[NUMBIN-2] = value;
8751 for (i = 0; i < (NUMBIN-2); i++) {
8752 binval += 500; /* 500m s bins */
8753 if (value <= binval) {
8758 histo->bin[NUMBIN-3]++;
8762 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
8764 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
8765 struct sk_buff *skb;
8768 int d1, d2, d3, end2end;
8772 skb = PKTTONATIVE(dhdp->osh, pktbuf);
8773 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
8775 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
8776 memcpy(&old_magic, p1+78, 2);
8777 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
8782 if (htsf_ts->magic == HTSFMAGIC) {
8783 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
8784 htsf_ts->cE0 = get_cycles();
8787 if (old_magic == 0xACAC) {
8790 htsf = dhd_get_htsf(dhd, 0);
8791 memcpy(skb->data+92, &htsf, sizeof(uint32));
8793 memcpy(&ts[tsidx].t1, skb->data+80, 16);
8795 d1 = ts[tsidx].t2 - ts[tsidx].t1;
8796 d2 = ts[tsidx].t3 - ts[tsidx].t2;
8797 d3 = ts[tsidx].t4 - ts[tsidx].t3;
8798 end2end = ts[tsidx].t4 - ts[tsidx].t1;
8800 sorttobin(d1, &vi_d1);
8801 sorttobin(d2, &vi_d2);
8802 sorttobin(d3, &vi_d3);
8803 sorttobin(end2end, &vi_d4);
8805 if (end2end > 0 && end2end > maxdelay) {
8807 maxdelaypktno = tspktcnt;
8808 memcpy(&maxdelayts, &ts[tsidx], 16);
8810 if (++tsidx >= TSMAX)
8815 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
8817 uint32 htsf = 0, cur_cycle, delta, delta_us;
8818 uint32 factor, baseval, baseval2;
8824 if (cur_cycle > dhd->htsf.last_cycle)
8825 delta = cur_cycle - dhd->htsf.last_cycle;
8827 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
8832 if (dhd->htsf.coef) {
8833 /* times ten to get the first digit */
8834 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
8835 baseval = (delta*10)/factor;
8836 baseval2 = (delta*10)/(factor+1);
8837 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
8838 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
8841 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
8847 static void dhd_dump_latency(void)
8850 int d1, d2, d3, d4, d5;
8852 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
8853 for (i = 0; i < TSMAX; i++) {
8854 d1 = ts[i].t2 - ts[i].t1;
8855 d2 = ts[i].t3 - ts[i].t2;
8856 d3 = ts[i].t4 - ts[i].t3;
8857 d4 = ts[i].t4 - ts[i].t1;
8858 d5 = ts[max].t4-ts[max].t1;
8859 if (d4 > d5 && d4 > 0) {
8862 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
8863 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
8867 printf("current idx = %d \n", tsidx);
8869 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
8870 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
8871 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
8872 maxdelayts.t2 - maxdelayts.t1,
8873 maxdelayts.t3 - maxdelayts.t2,
8874 maxdelayts.t4 - maxdelayts.t3,
8875 maxdelayts.t4 - maxdelayts.t1);
8880 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
8892 memset(&ioc, 0, sizeof(ioc));
8893 memset(&tsf_buf, 0, sizeof(tsf_buf));
8895 ioc.cmd = WLC_GET_VAR;
8897 ioc.len = (uint)sizeof(buf);
8900 strncpy(buf, "tsf", sizeof(buf) - 1);
8901 buf[sizeof(buf) - 1] = '\0';
8902 s1 = dhd_get_htsf(dhd, 0);
8903 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
8905 DHD_ERROR(("%s: tsf is not supported by device\n",
8906 dhd_ifname(&dhd->pub, ifidx)));
8911 s2 = dhd_get_htsf(dhd, 0);
8913 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
8914 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
8915 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
8916 dhd->htsf.coefdec2, s2-tsf_buf.low);
8917 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
8921 void htsf_update(dhd_info_t *dhd, void *data)
8923 static ulong cur_cycle = 0, prev_cycle = 0;
8924 uint32 htsf, tsf_delta = 0;
8925 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
8929 /* cycles_t in inlcude/mips/timex.h */
8933 prev_cycle = cur_cycle;
8936 if (cur_cycle > prev_cycle)
8937 cyc_delta = cur_cycle - prev_cycle;
8941 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
8945 printf(" tsf update ata point er is null \n");
8947 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
8948 memcpy(&cur_tsf, data, sizeof(tsf_t));
8950 if (cur_tsf.low == 0) {
8951 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
8955 if (cur_tsf.low > prev_tsf.low)
8956 tsf_delta = (cur_tsf.low - prev_tsf.low);
8958 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
8959 cur_tsf.low, prev_tsf.low));
8960 if (cur_tsf.high > prev_tsf.high) {
8961 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
8962 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
8965 return; /* do not update */
8969 hfactor = cyc_delta / tsf_delta;
8970 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
8971 dec1 = tmp/tsf_delta;
8972 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
8973 tmp = (tmp - (dec1*tsf_delta))*10;
8974 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
8993 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
8994 dhd->htsf.coef = hfactor;
8995 dhd->htsf.last_cycle = cur_cycle;
8996 dhd->htsf.last_tsf = cur_tsf.low;
8997 dhd->htsf.coefdec1 = dec1;
8998 dhd->htsf.coefdec2 = dec2;
9001 htsf = prev_tsf.low;
9005 #endif /* WLMEDIA_HTSF */
9007 #ifdef CUSTOM_SET_CPUCORE
9008 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
9010 int e_dpc = 0, e_rxf = 0, retry_set = 0;
9012 if (!(dhd->chan_isvht80)) {
9013 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
9020 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9021 cpumask_of(DPC_CPUCORE));
9023 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9024 cpumask_of(PRIMARY_CPUCORE));
9026 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9027 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
9032 } while (e_dpc < 0);
9037 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9038 cpumask_of(RXF_CPUCORE));
9040 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9041 cpumask_of(PRIMARY_CPUCORE));
9043 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9044 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
9049 } while (e_rxf < 0);
9051 #ifdef DHD_OF_SUPPORT
9052 interrupt_set_cpucore(set);
9053 #endif /* DHD_OF_SUPPORT */
9054 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
9058 #endif /* CUSTOM_SET_CPUCORE */
9059 #if defined(DHD_TCP_WINSIZE_ADJUST)
9060 static int dhd_port_list_match(int port)
9063 for (i = 0; i < MAX_TARGET_PORTS; i++) {
9064 if (target_ports[i] == port)
9069 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
9071 struct iphdr *ipheader;
9072 struct tcphdr *tcpheader;
9074 int32 incremental_checksum;
9076 if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
9078 if (skb == NULL || skb->data == NULL)
9081 ipheader = (struct iphdr*)(skb->data);
9083 if (ipheader->protocol == IPPROTO_TCP) {
9084 tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
9086 win_size = ntoh16(tcpheader->window);
9087 if (win_size < MIN_TCP_WIN_SIZE &&
9088 dhd_port_list_match(ntoh16(tcpheader->dest))) {
9089 incremental_checksum = ntoh16(tcpheader->check);
9090 incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
9091 if (incremental_checksum < 0)
9092 --incremental_checksum;
9093 tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
9094 tcpheader->check = hton16((unsigned short)incremental_checksum);
9097 skb_push(skb, (ipheader->ihl)<<2);
9100 #endif /* DHD_TCP_WINSIZE_ADJUST */
9102 /* Get interface specific ap_isolate configuration */
9103 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
9105 dhd_info_t *dhd = dhdp->info;
9108 ASSERT(idx < DHD_MAX_IFS);
9110 ifp = dhd->iflist[idx];
9112 return ifp->ap_isolate;
9115 /* Set interface specific ap_isolate configuration */
9116 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
9118 dhd_info_t *dhd = dhdp->info;
9121 ASSERT(idx < DHD_MAX_IFS);
9123 ifp = dhd->iflist[idx];
9125 ifp->ap_isolate = val;
9131 /* Returns interface specific WMF configuration */
9132 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
9134 dhd_info_t *dhd = dhdp->info;
9137 ASSERT(idx < DHD_MAX_IFS);
9139 ifp = dhd->iflist[idx];
9142 #endif /* DHD_WMF */
9145 #ifdef DHD_UNICAST_DHCP
9147 dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
9148 uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
9150 uint8 *frame = PKTDATA(pub->osh, pktbuf);
9151 int length = PKTLEN(pub->osh, pktbuf);
9152 uint8 *pt; /* Pointer to type field */
9155 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
9156 if (length < ETHER_HDR_LEN) {
9157 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
9158 __FUNCTION__, length));
9160 } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
9161 /* Frame is Ethernet II */
9162 pt = frame + ETHER_TYPE_OFFSET;
9163 } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
9164 !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
9165 pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
9168 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
9173 ethertype = ntoh16_ua(pt);
9175 /* Skip VLAN tag, if any */
9176 if (ethertype == ETHER_TYPE_8021Q) {
9179 if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
9180 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
9181 __FUNCTION__, length));
9185 ethertype = ntoh16_ua(pt);
9188 *data_ptr = pt + ETHER_TYPE_LEN;
9189 *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
9190 *et_ptr = ethertype;
9196 dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
9197 uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
9199 struct ipv4_hdr *iph; /* IP frame pointer */
9200 int iplen; /* IP frame length */
9201 uint16 ethertype, iphdrlen, ippktlen;
9206 if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
9207 &iplen, ðertype, &snap) != 0)
9210 if (ethertype != ETHER_TYPE_IP) {
9214 /* We support IPv4 only */
9215 if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
9219 /* Header length sanity */
9220 iphdrlen = IPV4_HLEN(iph);
9223 * Packet length sanity; sometimes we receive eth-frame size bigger
9224 * than the IP content, which results in a bad tcp chksum
9226 ippktlen = ntoh16(iph->tot_len);
9227 if (ippktlen < iplen) {
9229 DHD_INFO(("%s: extra frame length ignored\n",
9232 } else if (ippktlen > iplen) {
9233 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
9234 __FUNCTION__, ippktlen - iplen));
9238 if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
9239 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
9240 __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
9245 * We don't handle fragmented IP packets. A first frag is indicated by the MF
9246 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
9248 iph_frag = ntoh16(iph->frag);
9250 if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
9251 DHD_INFO(("DHD:%s: IP fragment not handled\n",
9256 prot = IPV4_PROT(iph);
9258 *data_ptr = (((uint8 *)iph) + iphdrlen);
9259 *len_ptr = iplen - iphdrlen;
9264 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
9266 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
9269 uint8 *eh = PKTDATA(pub->osh, pktbuf);
9278 if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
9280 if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
9282 if (prot != IP_PROT_UDP)
9284 /* check frame length, at least UDP_HDR_LEN */
9285 if (udpl < UDP_HDR_LEN) {
9286 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
9290 port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
9291 /* only process DHCP packets from server to client */
9292 if (port != DHCP_PORT_CLIENT)
9295 dhcp = udph + UDP_HDR_LEN;
9296 dhcpl = udpl - UDP_HDR_LEN;
9298 if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
9299 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
9303 /* only process DHCP reply(offer/ack) packets */
9304 if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
9306 chaddr = dhcp + DHCP_CHADDR_OFFSET;
9307 stainfo = dhd_find_sta(pub, ifidx, chaddr);
9309 bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
9314 #endif /* DHD_UNICAST_DHD */
9315 #ifdef DHD_L2_FILTER
9316 /* Check if packet type is ICMP ECHO */
9318 int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
9320 struct bcmicmp_hdr *icmph;
9324 if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
9326 if (prot == IP_PROT_ICMP) {
9327 if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
9332 #endif /* DHD_L2_FILTER */
9334 void *dhd_get_pub(struct net_device *dev)
9336 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
9337 return (void *)&dhdinfo->pub;