---help---
Path to the calibration file.
-config BCMDHD_CONFIG_PATH
- depends on BCMDHD
- string "Config path"
- default "/system/etc/firmware/config.txt"
- ---help---
- Path to the driver configuration file.
-
config BCMDHD_WEXT
bool "Enable WEXT support"
depends on BCMDHD && CFG80211 = n
Enables WEXT support
choice
+ prompt "Enable Chip Interface"
depends on BCMDHD
+ ---help---
+ Enable Chip Interface.
+config BCMDHD_SDIO
+ bool "SDIO bus interface support"
+ depends on BCMDHD && MMC
+config BCMDHD_PCIE
+ bool "PCIe bus interface support"
+ depends on BCMDHD && PCI
+endchoice
+
+choice
+ depends on BCMDHD && BCMDHD_SDIO
prompt "Interrupt type"
---help---
- Interrupt type
+ Interrupt type
config BCMDHD_OOB
- depends on BCMDHD
+ depends on BCMDHD && BCMDHD_SDIO
bool "Out-of-Band Interrupt"
default y
---help---
- Interrupt from WL_HOST_WAKE.
+ Interrupt from WL_HOST_WAKE.
config BCMDHD_SDIO_IRQ
- depends on BCMDHD
+ depends on BCMDHD && BCMDHD_SDIO
bool "In-Band Interrupt"
---help---
Interrupt from SDIO DAT[1]
endchoice
-
CONFIG_CFG80211 = y
CONFIG_BCMDHD_OOB = y
+CONFIG_BCMDHD_SDIO := y
+#CONFIG_BCMDHD_PCIE := y
+
DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
-DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG \
-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
- -DKEEP_ALIVE -DPKT_FILTER_SUPPORT \
- -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
- -DDHD_USE_IDLECOUNT -DSET_RANDOM_MAC_SOFTAP -DVSDB \
+ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHD_USE_IDLECOUNT \
+ -DEMBEDDED_PLATFORM -DPNO_SUPPORT -DVSDB \
-DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST \
-DESCAN_RESULT_PATCH -DSUPPORT_PM2_ONLY -DWLTDLS \
-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \
- -DMIRACAST_AMPDU_SIZE=8 \
- -DSDTEST -DBDC -DDHD_BCMEVENTS -DPROP_TXSTATUS -DPROP_TXSTATUS_VSDB \
- -DWL_SUPPORT_BACKPORTED_KPATCHES -DDHDTCPACK_SUPPRESS \
- -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd \
+ -DMIRACAST_AMPDU_SIZE=8 -DDHDTCPACK_SUPPRESS \
+ -DSDTEST -DBDC -DDHD_BCMEVENTS -DPROP_TXSTATUS_VSDB \
+ -DGET_OTP_MAC_ENABLE -DSET_RANDOM_MAC_SOFTAP \
+ -DENABLE_INSMOD_NO_FW_LOAD \
+ -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd \
-Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include
-DHDCFLAGS += \
- -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DSDIO_CRC_ERROR_FIX \
- -DCUSTOM_SDIO_F2_BLKSIZE=128 -DUSE_SDIOFIFO_IOVAR
-
DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \
dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \
dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
hnd_pktq.o hnd_pktpool.o dhd_config.o
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+DHDCFLAGS += \
+ -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DSDIO_CRC_ERROR_FIX \
+ -DCUSTOM_SDIO_F2_BLKSIZE=128 -DUSE_SDIOFIFO_IOVAR -DBCMSDIOH_TXGLOM \
+ -DPROP_TXSTATUS
+
DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
dhd_sdio.o dhd_cdc.o dhd_wlfc.o
-$(MODULE_NAME)-y += $(DHDOFILES)
-
-DHDOFILES += dhd_gpio.o
-DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT -DGET_CUSTOM_MAC_ENABLE
-#DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
-
ifeq ($(CONFIG_BCMDHD_OOB),y)
DHDCFLAGS += -DOOB_INTR_ONLY -DHW_OOB -DCUSTOMER_OOB
ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
else
DHDCFLAGS += -DSDIO_ISR_THREAD
endif
+endif
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+DHDCFLAGS += \
+ -DPCIE_FULL_DONGLE -DBCMPCIE -DSHOW_LOGTRACE -DDPCIE_TX_DEFERRAL \
+ -DCUSTOM_DPC_PRIO_SETTING=-1
+
+DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \
+ dhd_msgbuf.o
+endif
+
+$(MODULE_NAME)-y += $(DHDOFILES)
+
+#ifeq ($(CONFIG_MACH_ODROID_4210),y)
+DHDOFILES += dhd_gpio.o
+DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT -DGET_CUSTOM_MAC_ENABLE
+#DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
+#endif
DHDCFLAGS += -DBAND_AG
ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
-DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT
+# add dhd_static_buf to kernel image build
+#obj-y += dhd_static_buf.o
+DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF
endif
ifneq ($(CONFIG_WIRELESS_EXT),)
DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
endif
ifneq ($(CONFIG_CFG80211),)
-bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o wl_cfg_btcoex.o
-#DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o
+bcmdhd-objs += dhd_cfg80211.o dhd_cfg_vendor.o
+DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
#DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
-DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7
DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
-endif
-ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),)
-DHDCFLAGS += -DWL_SCHED_SCAN
+DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
endif
EXTRA_CFLAGS = $(DHDCFLAGS)
ifeq ($(CONFIG_BCMDHD),m)
+DHDCFLAGS += -DMULTIPLE_SUPPLICANT
EXTRA_LDFLAGS += --strip-debug
+else
+DHDCFLAGS += -DBUILD_IN_KERNEL
endif
$(MODULE_NAME)-y += rkversion.o
obj-$(CONFIG_RKWIFI) += $(MODULE_NAME).o
-EXTRA_CFLAGS = $(DHDCFLAGS)
-ifeq ($(CONFIG_RKWIFI),m)
-EXTRA_LDFLAGS += --strip-debug
-endif
-
KERNEL_DIR = /home/duke/jb_4.2/kernel
all:
* bcmevent read-only data shared by kernel or app layers
*
* $Copyright Open Broadcom Corporation$
- * $Id: bcmevent.c 487838 2014-06-27 05:51:44Z $
+ * $Id: bcmevent.c 492377 2014-07-21 19:54:06Z $
*/
#include <typedefs.h>
BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
#endif
-#if 0 && (NDISVER >= 0x0620)
- BCMEVENT_NAME(WLC_E_PRE_ASSOC_IND),
- BCMEVENT_NAME(WLC_E_PRE_REASSOC_IND),
- BCMEVENT_NAME(WLC_E_CHANNEL_ADOPTED),
- BCMEVENT_NAME(WLC_E_AP_STARTED),
- BCMEVENT_NAME(WLC_E_DFS_AP_STOP),
- BCMEVENT_NAME(WLC_E_DFS_AP_RESUME),
- BCMEVENT_NAME(WLC_E_ASSOC_IND_NDIS),
- BCMEVENT_NAME(WLC_E_REASSOC_IND_NDIS),
- BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS),
- BCMEVENT_NAME(WLC_E_AUTH_REQ),
- BCMEVENT_NAME(WLC_E_IBSS_COALESCE),
-#endif
#ifdef BCMWAPI_WAI
BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
BCMEVENT_NAME(WLC_E_WAI_MSG),
SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
return -EBUSY;
}
+#ifdef HW_OOB
+ printf("%s: HW_OOB enabled\n", __FUNCTION__);
+#else
+ printf("%s: SW_OOB enabled\n", __FUNCTION__);
+#endif
SDLX_MSG(("%s OOB irq=%d flags=%X\n", __FUNCTION__,
(int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+ bcmsdh_osinfo->oob_irq_enabled = TRUE;
+ bcmsdh_osinfo->oob_irq_registered = TRUE;
#if defined(CONFIG_ARCH_ODIN)
err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
#endif /* defined(CONFIG_ARCH_ODIN) */
if (err) {
+ bcmsdh_osinfo->oob_irq_enabled = FALSE;
+ bcmsdh_osinfo->oob_irq_registered = FALSE;
SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
return err;
}
else
bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
#endif
- bcmsdh_osinfo->oob_irq_enabled = TRUE;
- bcmsdh_osinfo->oob_irq_registered = TRUE;
return 0;
}
static void IRQHandlerF2(struct sdio_func *func);
#endif /* !defined(OOB_INTR_ONLY) */
static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+#if defined(ENABLE_INSMOD_NO_FW_LOAD)
extern int sdio_reset_comm(struct mmc_card *card);
-
+#else
+int sdio_reset_comm(struct mmc_card *card)
+{
+ return 0;
+}
+#endif
+#ifdef GLOBAL_SDMMC_INSTANCE
extern PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
#define DEFAULT_SDIO_F2_BLKSIZE 512
#ifndef CUSTOM_SDIO_F2_BLKSIZE
sd->fake_func0.num = 0;
sd->fake_func0.card = func->card;
sd->func[0] = &sd->fake_func0;
+#ifdef GLOBAL_SDMMC_INSTANCE
if (func->num == 2)
sd->func[1] = gInstance->func[1];
+#else
+ sd->func[1] = func->card->sdio_func[0];
+#endif
sd->func[2] = func->card->sdio_func[1];
+#ifdef GLOBAL_SDMMC_INSTANCE
sd->func[func->num] = func;
+#endif
sd->num_funcs = 2;
sd->sd_blockmode = TRUE;
sd->use_client_ints = TRUE;
module_param(clockoverride, int, 0644);
MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+#ifdef GLOBAL_SDMMC_INSTANCE
PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
/* Maximum number of bcmsdh_sdmmc devices supported by driver */
#define BCMSDH_SDMMC_MAX_DEVICES 1
sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
return;
}
+ sd_err(("%s: Enter\n", __FUNCTION__));
osh = sdioh->osh;
bcmsdh_remove(sdioh->bcmsdh);
sd_info(("sdio_device: 0x%04x\n", func->device));
sd_info(("Function#: 0x%04x\n", func->num));
+#ifdef GLOBAL_SDMMC_INSTANCE
gInstance->func[func->num] = func;
+#endif
/* 4318 doesn't have function 2 */
if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
struct sdio_func *func = dev_to_sdio_func(pdev);
mmc_pm_flag_t sdio_flags;
- printk("%s Enter\n", __FUNCTION__);
+ printk("%s Enter func->num=%d\n", __FUNCTION__, func->num);
if (func->num != 2)
return 0;
#endif
struct sdio_func *func = dev_to_sdio_func(pdev);
- printk("%s Enter\n", __FUNCTION__);
+ printk("%s Enter func->num=%d\n", __FUNCTION__, func->num);
if (func->num != 2)
return 0;
#if defined(OOB_INTR_ONLY)
sdioh = sdio_get_drvdata(func);
bcmsdh_resume(sdioh->bcmsdh);
-#endif
+#endif
smp_mb();
printk("%s Exit\n", __FUNCTION__);
.pm = &bcmsdh_sdmmc_pm_ops,
},
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
- };
+};
struct sdos_info {
sdioh_info_t *sd;
*/
int bcmsdh_register_client_driver(void)
{
+#ifdef GLOBAL_SDMMC_INSTANCE
gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
if (!gInstance)
return -ENOMEM;
+#endif
return sdio_register_driver(&bcmsdh_sdmmc_driver);
}
void bcmsdh_unregister_client_driver(void)
{
sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+#ifdef GLOBAL_SDMMC_INSTANCE
if (gInstance)
kfree(gInstance);
+#endif
}
* Driver O/S-independent utility routines
*
* $Copyright Open Broadcom Corporation$
- * $Id: bcmutils.c 488316 2014-06-30 15:22:21Z $
+ * $Id: bcmutils.c 496061 2014-08-11 06:14:48Z $
*/
#include <bcm_cfg.h>
return NULL;
}
+void
+id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16)
+{
+ uint16 idx, val16;
+ id16_map_t * id16_map;
+
+ ASSERT(total_ids > 0);
+ ASSERT((start_val16 + total_ids) < ID16_INVALID);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+ if (id16_map == NULL) {
+ return;
+ }
+
+ id16_map->total = total_ids;
+ id16_map->start = start_val16;
+ id16_map->failures = 0;
+
+ /* Populate stack with 16bit id values, commencing with start_val16 */
+ id16_map->stack_idx = 0;
+ val16 = start_val16;
+
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
+ }
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+}
+
uint16 BCMFASTPATH /* Allocate a unique 16bit id */
id16_map_alloc(void * id16_map_hndl)
{
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd.h 491170 2014-07-15 06:23:58Z $
+ * $Id: dhd.h 504503 2014-09-24 11:28:56Z $
*/
/****************
struct dhd_prot;
struct dhd_info;
struct dhd_ioctl;
+struct pktq;
/* The level of bus communication with the dongle */
enum dhd_bus_state {
DHD_BUS_SUSPEND, /* Bus has been suspended */
};
-#if defined(NDISVER) && (NDISVER >= 0x0600)
+#if defined(NDISVER)
+#if (NDISVER >= 0x0600)
/* Firmware requested operation mode */
#define STA_MASK 0x0001
#define HOSTAPD_MASK 0x0002
#define P2P_GC_ENABLED 0x0020
#define CONCURENT_MASK 0x00F0
#endif /* (NDISVER >= 0x0600) */
+#endif /* #if defined(NDISVER) */
+
+#define DHD_IF_ROLE_STA(role) (role == WLC_E_IF_ROLE_STA ||\
+ role == WLC_E_IF_ROLE_P2P_CLIENT)
/* For supporting multiple interfaces */
#define DHD_MAX_IFS 16
#if defined(STATIC_WL_PRIV_STRUCT)
DHD_PREALLOC_WIPHY_ESCAN0 = 5,
#endif /* STATIC_WL_PRIV_STRUCT */
- DHD_PREALLOC_DHD_INFO = 7
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
+ DHD_PREALLOC_FLOWRING = 10
};
/* Packet alignment for most efficient SDIO (can change based on platform) */
* 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
*/
TCPACK_SUP_DELAYTX,
+ TCPACK_SUP_HOLD,
TCPACK_SUP_LAST_MODE
};
#endif /* DHDTCPACK_SUPPRESS */
#ifdef DHDTCPACK_SUPPRESS
uint8 tcpack_sup_mode; /* TCPACK suppress mode */
void *tcpack_sup_module; /* TCPACK suppress module */
+ uint32 tcpack_sup_ratio;
+ uint32 tcpack_sup_delay;
#endif /* DHDTCPACK_SUPPRESS */
#if defined(ARP_OFFLOAD_SUPPORT)
uint32 arp_version;
#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */
#endif
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#ifdef PKT_FILTER_SUPPORT
+ uint pkt_filter_mode;
+ uint pkt_filter_ports_count;
+ uint16 pkt_filter_ports[WL_PKT_FILTER_PORTS_MAX];
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
#ifdef CUSTOM_SET_CPUCORE
struct task_struct * current_dpc;
struct task_struct * current_rxf;
int chan_isvht80;
#endif /* CUSTOM_SET_CPUCORE */
-
void *sta_pool; /* pre-allocated pool of sta objects */
void *staid_allocator; /* allocator of sta indexes */
void *flowid_allocator; /* unique flowid allocator */
void *flow_ring_table; /* flow ring table, include prot and bus info */
void *if_flow_lkup; /* per interface flowid lkup hash table */
+ void *flowid_lock; /* per os lock for flowid info protection */
uint32 num_flow_rings;
+
+ uint32 d2h_sync_mode; /* D2H DMA completion sync mode */
+
uint8 flow_prio_map[NUMPRIO];
uint8 flow_prio_map_type;
char enable_log[MAX_EVENT];
extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+extern int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub)
#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub)
+#ifdef BCMPCIE_OOB_HOST_WAKE
+#define OOB_WAKE_LOCK_TIMEOUT 500
+#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
+#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
+#endif /* BCMPCIE_OOB_HOST_WAKE */
#define DHD_PACKET_TIMEOUT_MS 500
#define DHD_EVENT_TIMEOUT_MS 1500
void dhd_net_if_unlock(struct net_device *dev);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
extern struct mutex _dhd_sdio_mutex_lock_;
#endif
#endif /* MULTIPLE_SUPPLICANT */
/* Indication from bus module regarding removal/absence of dongle */
extern void dhd_detach(dhd_pub_t *dhdp);
extern void dhd_free(dhd_pub_t *dhdp);
+extern void dhd_clear(dhd_pub_t *dhdp);
/* Indication from bus module to change flow-control state */
extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
-#if 0 && (NDISVER >= 0x0600)
-#define dhd_os_open_image(a) wl_os_open_image(a)
-#define dhd_os_close_image(a) wl_os_close_image(a)
-#define dhd_os_get_image_block(a, b, c) wl_os_get_image_block(a, b, c)
-#endif /* (NDISVER >= 0x0600) */
extern int dhd_os_get_image_block(char * buf, int len, void * image);
extern void * dhd_os_open_image(char * filename);
extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
extern bool dhd_os_check_if_up(dhd_pub_t *pub);
extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock_all(dhd_pub_t *pub);
extern int dhd_get_instance(dhd_pub_t *pub);
#ifdef CUSTOM_SET_CPUCORE
extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
#endif /* KEEP_ALIVE */
+#ifdef SUPPORT_AP_POWERSAVE
+extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
+#endif
+
#ifdef PKT_FILTER_SUPPORT
#define DHD_UNICAST_FILTER_NUM 0
#define DHD_MULTICAST6_FILTER_NUM 3
#define DHD_MDNS_FILTER_NUM 4
#define DHD_ARP_FILTER_NUM 5
-extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+/* Port based packet filtering command actions */
+#define PKT_FILTER_PORTS_CLEAR 0
+#define PKT_FILTER_PORTS_ADD 1
+#define PKT_FILTER_PORTS_DEL 2
+#define PKT_FILTER_PORTS_LOOPBACK 3
+#define PKT_FILTER_PORTS_MAX PKT_FILTER_PORTS_LOOPBACK
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
+extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
extern int net_os_enable_packet_filter(struct net_device *dev, int val);
extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+extern void dhd_set_packet_filter_mode(struct net_device *dev, char *command);
+extern int dhd_set_packet_filter_ports(struct net_device *dev, char *command);
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
#endif /* PKT_FILTER_SUPPORT */
extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
extern uint wl_dbg_level;
#endif
+#ifdef CUSTOMER_HW
+struct wifi_platform_data {
+ int (*set_power)(bool val);
+ int (*set_carddetect)(bool val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+};
+#endif
+
extern uint dhd_slpauto;
/* Use interrupts */
#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY
#endif /* WIFI_TURNON_DELAY */
+#ifdef BCMSDIO
#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */
+#else
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 0 /* msec */
+#endif
#ifndef CUSTOM_DHD_WATCHDOG_MS
#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS
#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
#endif
#endif /* WLTDLS */
+#define DEFAULT_BCN_TIMEOUT 8
+#ifndef CUSTOM_BCN_TIMEOUT
+#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT
+#endif
#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */
#ifndef MAX_DTIM_ALLOWED_INTERVAL
#define DHD_GENERAL_UNLOCK(dhdp, flags) \
dhd_os_general_spin_unlock((dhdp), (flags))
-/* Enable DHD flowring queue spin lock/unlock */
-#define DHD_QUEUE_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
-#define DHD_QUEUE_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+/* Enable DHD flowring spin lock/unlock */
+#define DHD_FLOWRING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWRING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+
+/* Enable DHD common flowring info spin lock/unlock */
+#define DHD_FLOWID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWID_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
} wl_io_pport_t;
extern void *dhd_pub_wlinfo(dhd_pub_t *dhd_pub);
-#ifdef EXYNOS5433_PCIE_WAR
-extern void exynos_pcie_set_l1_exit(void);
-extern void exynos_pcie_clear_l1_exit(void);
-extern int enum_wifi;
-#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef CONFIG_MACH_UNIVERSAL5433
+extern int check_rev(void);
+#endif
#endif /* _dhd_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_bus.h 491657 2014-07-17 06:29:40Z $
+ * $Id: dhd_bus.h 497466 2014-08-19 15:41:01Z $
*/
#ifndef _dhd_bus_h_
extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus,
void * data, uint8 flowid);
extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node);
-extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, uint16 flowid);
+extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node);
extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status);
extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node);
extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
extern void dhdpcie_bus_free_resource(struct dhd_bus *bus);
extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus);
extern int dhd_bus_release_dongle(struct dhd_bus *bus);
+extern int dhd_bus_request_irq(struct dhd_bus *bus);
#endif /* BCMPCIE */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_cdc.c 472193 2014-04-23 06:27:38Z $
+ * $Id: dhd_cdc.c 492377 2014-07-21 19:54:06Z $
*
* BDC is like CDC, except it includes a header for data packets to convey
* packet priority over the bus, and flags (e.g. to indicate checksum status
PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
#endif /* BDC */
-#if defined(NDISVER) && (NDISVER < 0x0630)
+#if defined(NDISVER)
+#if (NDISVER < 0x0630)
if (PKTLEN(dhd->osh, pktbuf) < (uint32) (data_offset << 2)) {
DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
PKTLEN(dhd->osh, pktbuf), (data_offset * 4)));
return BCME_ERROR;
}
+#endif /* #if defined(NDISVER) */
#endif /* (NDISVER < 0x0630) */
#ifdef PROP_TXSTATUS
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_cfg_vendor.c 487126 2014-06-24 23:06:12Z $
+ * $Id: dhd_cfg_vendor.c 495605 2014-08-07 18:41:34Z $
*/
+#include <linux/vmalloc.h>
#include <linuxver.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
#include <wl_cfgvendor.h>
#include <dngl_stats.h>
#include <dhd.h>
+#include <dhd_dbg.h>
#include <dhdioctl.h>
#include <brcm_nl80211.h>
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_common.c 490628 2014-07-11 07:13:31Z $
+ * $Id: dhd_common.c 492215 2014-07-20 16:44:15Z $
*/
#include <typedefs.h>
#include <osl.h>
if (type != WLC_E_LINK) {
uint8 ifindex = (uint8)hostidx;
uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
- if (role == WLC_E_IF_ROLE_STA) {
+ if (DHD_IF_ROLE_STA(role)) {
dhd_flow_rings_delete(dhd_pub, ifindex);
} else {
dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
} \
} while (0)\r
\r
-#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */\r
#define MAXSZ_BUF 1000\r
#define MAXSZ_CONFIG 4096\r
\r
+#define FW_TYPE_STA 0\r
+#define FW_TYPE_APSTA 1\r
+#define FW_TYPE_P2P 2\r
+#define FW_TYPE_MFG 3\r
+#define FW_TYPE_G 0\r
+#define FW_TYPE_AG 1\r
+\r
+#ifdef BCMSDIO\r
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */\r
+\r
#define BCM43362A0_CHIP_REV 0\r
#define BCM43362A2_CHIP_REV 1\r
#define BCM43430A0_CHIP_REV 0\r
+#define BCM43430A1_CHIP_REV 1\r
#define BCM4330B2_CHIP_REV 4\r
#define BCM43340B0_CHIP_REV 2\r
#define BCM43341B0_CHIP_REV 2\r
#define BCM4354A1_CHIP_REV 1\r
#define BCM4356A2_CHIP_REV 2\r
\r
-#define FW_TYPE_STA 0\r
-#define FW_TYPE_APSTA 1\r
-#define FW_TYPE_P2P 2\r
-#define FW_TYPE_MFG 3\r
-#define FW_TYPE_G 0\r
-#define FW_TYPE_AG 1\r
-\r
const static char *bcm4330b2_fw_name[] = {\r
"fw_RK903b2.bin",\r
"fw_RK903b2_apsta.bin",\r
"fw_bcm43438a0_mfg.bin"\r
};\r
\r
+const static char *bcm43438a1_fw_name[] = {\r
+ "fw_bcm43438a1.bin",\r
+ "fw_bcm43438a1_apsta.bin",\r
+ "fw_bcm43438a1_p2p.bin",\r
+ "fw_bcm43438a1_mfg.bin"\r
+};\r
+\r
const static char *bcm43341b0_ag_fw_name[] = {\r
"fw_bcm43341b0_ag.bin",\r
"fw_bcm43341b0_ag_apsta.bin",\r
"fw_bcm4356a2_ag_p2p.bin",\r
"fw_bcm4356a2_ag_mfg.bin"\r
};\r
+#endif\r
+#ifdef BCMPCIE\r
+#define BCM4356A2_CHIP_REV 2\r
+\r
+const static char *bcm4356a2_pcie_ag_fw_name[] = {\r
+ "fw_bcm4356a2_pcie_ag.bin",\r
+ "fw_bcm4356a2_pcie_ag_apsta.bin",\r
+ "fw_bcm4356a2_pcie_ag_p2p.bin",\r
+ "fw_bcm4356a2_pcie_ag_mfg.bin"\r
+};\r
+#endif\r
\r
#define htod32(i) i
#define htod16(i) i
#define htodchanspec(i) i
#define dtohchanspec(i) i\r
\r
+#ifdef BCMSDIO\r
void\r
dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list)\r
{\r
mac_list->count = 0;\r
}\r
\r
+#if defined(HW_OOB)\r
+void\r
+dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip)\r
+{\r
+ uint32 gpiocontrol, addr;\r
+\r
+ if (CHIPID(chip) == BCM43362_CHIP_ID) {\r
+ printf("%s: Enable HW OOB for 43362\n", __FUNCTION__);\r
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol);\r
+ gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);\r
+ gpiocontrol |= 0x2;\r
+ bcmsdh_reg_write(sdh, addr, 4, gpiocontrol);\r
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL);\r
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL);\r
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL);\r
+ }\r
+}\r
+#endif\r
+\r
int\r
dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac)\r
{\r
}\r
}\r
}\r
+#endif\r
\r
void\r
dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)\r
FW_TYPE_P2P : FW_TYPE_STA)));\r
\r
switch (chip) {\r
+#ifdef BCMSDIO\r
case BCM4330_CHIP_ID:\r
if (ag_type == FW_TYPE_G) {\r
if (chiprev == BCM4330B2_CHIP_REV)\r
case BCM43430_CHIP_ID:\r
if (chiprev == BCM43430A0_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm43438a0_fw_name[fw_type]);\r
+ else if (chiprev == BCM43430A1_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm43438a1_fw_name[fw_type]);\r
break;\r
case BCM43340_CHIP_ID:\r
if (chiprev == BCM43340B0_CHIP_REV)\r
else if (chiprev == BCM4356A2_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]);\r
break;\r
+#endif\r
+#ifdef BCMPCIE\r
+ case BCM4356_CHIP_ID:\r
+ if (chiprev == BCM4356A2_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4356a2_pcie_ag_fw_name[fw_type]);\r
+ break;\r
+#endif\r
}\r
\r
printf("%s: firmware_path=%s\n", __FUNCTION__, fw_path);\r
}\r
\r
-#if defined(HW_OOB)\r
void\r
-dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip)\r
+dhd_conf_set_conf_path_by_nv_path(dhd_pub_t *dhd, char *conf_path, char *nv_path)\r
{\r
- uint32 gpiocontrol, addr;\r
+ int i;\r
\r
- if (CHIPID(chip) == BCM43362_CHIP_ID) {\r
- printf("%s: Enable HW OOB for 43362\n", __FUNCTION__);\r
- addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol);\r
- gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);\r
- gpiocontrol |= 0x2;\r
- bcmsdh_reg_write(sdh, addr, 4, gpiocontrol);\r
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL);\r
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL);\r
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL);\r
- }\r
-}\r
+ if (nv_path[0] == '\0') {\r
+#ifdef CONFIG_BCMDHD_NVRAM_PATH\r
+ bcm_strncpy_s(conf_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);\r
+ if (nv_path[0] == '\0')\r
#endif\r
+ {\r
+ printf("nvram path is null\n");\r
+ return;\r
+ }\r
+ } else\r
+ strcpy(conf_path, nv_path);\r
\r
-void\r
-dhd_conf_set_fw_path(dhd_pub_t *dhd, char *fw_path)\r
-{\r
- if (dhd->conf->fw_path[0]) {\r
- strcpy(fw_path, dhd->conf->fw_path);\r
- printf("%s: fw_path is changed to %s\n", __FUNCTION__, fw_path);\r
+ /* find out the last '/' */\r
+ i = strlen(conf_path);\r
+ while (i>0){\r
+ if (conf_path[i] == '/') break;\r
+ i--;\r
}\r
-}\r
+ strcpy(&conf_path[i+1], "config.txt");\r
\r
-void\r
-dhd_conf_set_nv_path(dhd_pub_t *dhd, char *nv_path)\r
-{\r
- if (dhd->conf->nv_path[0]) {\r
- strcpy(nv_path, dhd->conf->nv_path);\r
- printf("%s: nv_path is changed to %s\n", __FUNCTION__, nv_path);\r
- }\r
+ printf("%s: config_path=%s\n", __FUNCTION__, conf_path);\r
}\r
\r
int\r
int bcmerror = -1;\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
\r
- if (dhd->conf->chip == BCM43362_CHIP_ID && dhd->conf->force_wme_ac) {\r
+ if (dhd->conf->force_wme_ac) {\r
bcm_mkiovar("force_wme_ac", (char *)&dhd->conf->force_wme_ac, 4, iovbuf, sizeof(iovbuf));\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: force_wme_ac setting failed %d\n", __FUNCTION__, bcmerror));\r
CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- sizeof(acp)));\r
+ (int)sizeof(acp)));\r
acparam = &acp[AC_BE];\r
CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- sizeof(acp)));\r
+ (int)sizeof(acp)));\r
acparam = &acp[AC_VI];\r
CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- sizeof(acp)));\r
+ (int)sizeof(acp)));\r
acparam = &acp[AC_VO];\r
CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- sizeof(acp)));\r
+ (int)sizeof(acp)));\r
\r
return;\r
}\r
CONFIG_TRACE(("%s: mod aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acp->ACI, acp->ACI&EDCF_AIFSN_MASK,\r
acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- sizeof(edcf_acparam_t)));\r
+ (int)sizeof(edcf_acparam_t)));\r
\r
/*\r
* Now use buf as an output buffer.\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
uint stbc = 0;\r
\r
- if (dhd->conf->chip == BCM4324_CHIP_ID && dhd->conf->stbc >= 0) {\r
+ if (dhd->conf->stbc >= 0) {\r
stbc = (uint)dhd->conf->stbc;\r
printf("%s: set stbc_tx %d\n", __FUNCTION__, stbc);\r
bcm_mkiovar("stbc_tx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
int i;\r
\r
/*\r
- All pkt: pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000
+ All pkt: pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000\r
Netbios pkt: 120 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089\r
*/\r
for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {\r
{\r
int i;\r
\r
- for(i=0; i<dhd->conf->pkt_filter_del.count; i++) {\r
- if (id == dhd->conf->pkt_filter_del.id[i]) {\r
- printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]);\r
- return true;\r
+ if (dhd && dhd->conf) {\r
+ for(i=0; i<dhd->conf->pkt_filter_del.count; i++) {\r
+ if (id == dhd->conf->pkt_filter_del.id[i]) {\r
+ printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]);\r
+ return true;\r
+ }\r
}\r
+ return false;\r
}\r
return false;\r
}\r
}\r
\r
void\r
-dhd_conf_set_glom(dhd_pub_t *dhd)\r
+dhd_conf_set_bus_txglom(dhd_pub_t *dhd)\r
{\r
int bcmerror = -1;\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
uint32 ampdu_ba_wsize = dhd->conf->ampdu_ba_wsize;\r
\r
- /* Set ampdu ba wsize */\r
+ /* Set ampdu_ba_wsize */\r
if (ampdu_ba_wsize > 0) {\r
printf("%s: set ampdu_ba_wsize %d\n", __FUNCTION__, ampdu_ba_wsize);\r
bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
sizeof(iovbuf), TRUE, 0)) < 0) {\r
- DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",\r
+ DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",\r
__FUNCTION__, ampdu_ba_wsize, bcmerror));\r
}\r
}\r
}\r
}\r
\r
+void\r
+dhd_conf_set_txbf(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
+ int txbf = dhd->conf->txbf;\r
+\r
+ /* Set txbf */\r
+ if (txbf >= 0) {\r
+ printf("%s: set txbf %d\n", __FUNCTION__, txbf);\r
+ bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
+ sizeof(iovbuf), TRUE, 0)) < 0) {\r
+ DHD_ERROR(("%s Set txbf to %d failed %d\n",\r
+ __FUNCTION__, txbf, bcmerror));\r
+ }\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_frameburst(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ int frameburst = dhd->conf->frameburst;\r
+\r
+ /* Set txbframeburstf */\r
+ if (frameburst >= 0) {\r
+ printf("%s: set frameburst %d\n", __FUNCTION__, frameburst);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, &frameburst , sizeof(frameburst), true, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_SET_FAKEFRAG setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_lpc(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
+ int lpc = dhd->conf->lpc;\r
+\r
+ /* Set lpc */\r
+ if (lpc >= 0) {\r
+ printf("%s: set lpc %d\n", __FUNCTION__, lpc);\r
+ bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
+ sizeof(iovbuf), TRUE, 0)) < 0) {\r
+ DHD_ERROR(("%s Set lpc to %d failed %d\n",\r
+ __FUNCTION__, lpc, bcmerror));\r
+ }\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_disable_proptx(dhd_pub_t *dhd)\r
+{ \r
+ printf("%s: set disable_proptx %d\n", __FUNCTION__, dhd->conf->disable_proptx);\r
+ disable_proptx = dhd->conf->disable_proptx;\r
+}\r
+\r
unsigned int\r
process_config_vars(char *varbuf, unsigned int len, char *pickbuf, char *param)\r
{\r
dhd_msg_level = (int)simple_strtol(pick, NULL, 0);\r
printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level);\r
}\r
+#ifdef BCMSDIO\r
/* Process sd_msglevel */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "sd_msglevel=");\r
sd_msglevel = (int)simple_strtol(pick, NULL, 0);\r
printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel);\r
}\r
+#endif\r
/* Process android_msg_level */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "android_msg_level=");\r
dhd_console_ms = (int)simple_strtol(pick, NULL, 0);\r
printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms);\r
}\r
-}\r
-\r
+}
+
void\r
dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *bufp, uint len)\r
{\r
* [nv_by_mac]: The same format as fw_by_mac\r
*\r
*/\r
-\r
int\r
-dhd_conf_read_config(dhd_pub_t *dhd)\r
+dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path)\r
{\r
int bcmerror = -1, i, j;\r
uint len, len_val;\r
void * image = NULL;\r
char * memblock = NULL;\r
char *bufp, pick[MAXSZ_BUF], *pch, *pick_tmp;\r
- char *pconf_path;\r
bool conf_file_exists;\r
wl_mac_list_t *mac_list;\r
wl_mac_range_t *mac_range;\r
struct dhd_conf *conf = dhd->conf;\r
\r
- pconf_path = dhd->conf_path;\r
-\r
- conf_file_exists = ((pconf_path != NULL) && (pconf_path[0] != '\0'));\r
- if (!conf_file_exists)\r
+ conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0'));\r
+ if (!conf_file_exists) {\r
+ printk("%s: config path %s\n", __FUNCTION__, conf_path);\r
return (0);\r
+ }\r
\r
if (conf_file_exists) {\r
- image = dhd_os_open_image(pconf_path);\r
+ image = dhd_os_open_image(conf_path);\r
if (image == NULL) {\r
- printk("%s: Ignore config file %s\n", __FUNCTION__, pconf_path);\r
+ printk("%s: Ignore config file %s\n", __FUNCTION__, conf_path);\r
goto err;\r
}\r
}\r
/* Process log_level */\r
dhd_conf_read_log_level(dhd, bufp, len);\r
dhd_conf_read_roam_params(dhd, bufp, len);\r
- dhd_conf_read_wme_ac_params(dhd, bufp, len);\r
+ dhd_conf_read_wme_ac_params(dhd, bufp, len);
\r
/* Process fw_by_mac */\r
memset(pick, 0, MAXSZ_BUF);\r
}\r
}\r
\r
- /* Process firmware path */\r
- memset(pick, 0, MAXSZ_BUF);\r
- len_val = process_config_vars(bufp, len, pick, "fw_path=");\r
- if (len_val) {\r
- memcpy(conf->fw_path, pick, len_val);\r
- printf("%s: fw_path = %s\n", __FUNCTION__, conf->fw_path);\r
- }\r
-\r
- /* Process nvram path */\r
- memset(pick, 0, MAXSZ_BUF);\r
- len_val = process_config_vars(bufp, len, pick, "nv_path=");\r
- if (len_val) {\r
- memcpy(conf->nv_path, pick, len_val);\r
- printf("%s: nv_path = %s\n", __FUNCTION__, conf->nv_path);\r
- }\r
-\r
/* Process band */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "band=");\r
printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable);\r
}\r
\r
+#ifdef BCMSDIO\r
/* Process dhd_doflow parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "dhd_doflow=");\r
dhd_doflow = TRUE;\r
printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow);\r
}\r
+#endif\r
\r
/* Process dhd_master_mode parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout);\r
}\r
\r
- /* Process bus_txglom */\r
+ /* Process bus:txglom */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "bus:txglom=");\r
if (len_val) {\r
conf->bus_txglom = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: bus:txglom = %d\n", __FUNCTION__, conf->bus_txglom);\r
+ printf("%s: bus:txglom = %d\n", __FUNCTION__, conf->bus_txglom);
}\r
\r
/* Process ampdu_ba_wsize parameters */\r
conf->spect = (int)simple_strtol(pick, NULL, 10);\r
printf("%s: spect = %d\n", __FUNCTION__, conf->spect);\r
}\r
- \r
+\r
+ /* Process txbf parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "txbf=");\r
+ if (len_val) {\r
+ conf->txbf = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf);\r
+ }\r
+\r
+ /* Process frameburst parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "frameburst=");\r
+ if (len_val) {\r
+ conf->frameburst = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst);\r
+ }\r
+\r
+ /* Process lpc parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "lpc=");\r
+ if (len_val) {\r
+ conf->lpc = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: lpc = %d\n", __FUNCTION__, conf->lpc);\r
+ }\r
+\r
+ /* Process use_rxchain parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "use_rxchain=");\r
+ if (len_val) {\r
+ conf->use_rxchain = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain);\r
+ }\r
+\r
+ /* Process txglomsize parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "txglomsize=");\r
+ if (len_val) {\r
+ conf->txglomsize = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize);\r
+ }\r
+\r
+ /* Process disable_proptx parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "disable_proptx=");\r
+ if (len_val) {\r
+ dhd->conf->disable_proptx = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: disable_proptx = %d\n", __FUNCTION__, dhd->conf->disable_proptx);\r
+ }\r
+\r
+ /* Process dpc_cpucore parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "dpc_cpucore=");\r
+ if (len_val) {\r
+ conf->dpc_cpucore = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore);\r
+ }\r
+\r
+ /* Process bus:rxglom parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "bus:rxglom=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "0", len_val))\r
+ conf->bus_rxglom = FALSE;\r
+ else\r
+ conf->bus_rxglom = TRUE;\r
+ printf("%s: bus_rxglom = %d\n", __FUNCTION__, conf->bus_rxglom);\r
+ }\r
+\r
+ /* Process deepsleep parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "deepsleep=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "1", len_val))\r
+ conf->deepsleep = TRUE;\r
+ else\r
+ conf->deepsleep = FALSE;\r
+ printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep);\r
+ }\r
+\r
bcmerror = 0;\r
} else {\r
CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len));\r
int\r
dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)\r
{\r
+ printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev);\r
dhd->conf->chip = chip;\r
dhd->conf->chiprev = chiprev;\r
return 0;\r
\r
CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
\r
+#ifdef BCMSDIO\r
dhd_conf_free_mac_list(&conf->fw_by_mac);\r
dhd_conf_free_mac_list(&conf->nv_by_mac);\r
+#endif\r
conf->band = WLC_BAND_AUTO;\r
conf->mimo_bw_cap = -1;\r
if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {\r
conf->srl = -1;\r
conf->lrl = -1;\r
conf->bcn_timeout = 15;\r
- if (conf->chip == BCM4339_CHIP_ID) {\r
- conf->bus_txglom = 8;\r
- conf->ampdu_ba_wsize = 40;\r
- }\r
conf->kso_enable = TRUE;\r
conf->spect = -1;\r
+ conf->txbf = -1;\r
+ conf->lpc = -1;\r
+ conf->disable_proptx = 0;\r
+ conf->bus_txglom = 0;\r
+ conf->use_rxchain = 1;\r
+ conf->bus_rxglom = TRUE;\r
+ conf->txglomsize = -1;\r
+ conf->ampdu_ba_wsize = 0;\r
+ conf->dpc_cpucore = 0;\r
+ conf->frameburst = -1;\r
+ conf->deepsleep = FALSE;\r
+ if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) {\r
+ conf->disable_proptx = 1;\r
+ conf->use_rxchain = 0;\r
+ }\r
+ if (conf->chip == BCM43430_CHIP_ID) {
+ conf->use_rxchain = 0;
+ conf->bus_rxglom = FALSE;
+ }
+ if (conf->chip == BCM4339_CHIP_ID) {\r
+ conf->txbf = 1;\r
+ }\r
+ if (conf->chip == BCM4354_CHIP_ID) {\r
+ conf->txbf = 1;\r
+ }\r
+ if (conf->chip == BCM4356_CHIP_ID) {\r
+ conf->txbf = 1;\r
+ }\r
\r
return 0;\r
}\r
int\r
dhd_conf_reset(dhd_pub_t *dhd)\r
{\r
+#ifdef BCMSDIO\r
dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);\r
dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);\r
+#endif\r
memset(dhd->conf, 0, sizeof(dhd_conf_t));\r
return 0;\r
}\r
CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
\r
if (dhd->conf) {\r
+#ifdef BCMSDIO\r
dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);\r
dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);\r
+#endif\r
MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t));\r
}\r
dhd->conf = NULL;\r
#define FW_PATH_AUTO_SELECT 1\r
extern char firmware_path[MOD_PARAM_PATHLEN];\r
extern int disable_proptx;\r
+#ifdef BCMSDIO\r
extern uint dhd_doflow;\r
+#endif\r
\r
/* mac range */\r
typedef struct wl_mac_range {\r
uint chiprev; /* chip revision */\r
wl_mac_list_ctrl_t fw_by_mac; /* Firmware auto selection by MAC */\r
wl_mac_list_ctrl_t nv_by_mac; /* NVRAM auto selection by MAC */\r
- char fw_path[MOD_PARAM_PATHLEN]; /* Firmware path */\r
- char nv_path[MOD_PARAM_PATHLEN]; /* NVRAM path */\r
uint band; /* Band, b:2.4G only, otherwise for auto */\r
int mimo_bw_cap; /* Bandwidth, 0:HT20ALL, 1: HT40ALL, 2:HT20IN2G_HT40PIN5G */\r
wl_country_t cspec; /* Country */\r
int srl; /* short retry limit */\r
int lrl; /* long retry limit */\r
uint bcn_timeout; /* beacon timeout */\r
- uint32 bus_txglom; /* bus:txglom */\r
- uint32 ampdu_ba_wsize;\r
bool kso_enable;\r
int spect;\r
+ int txbf;\r
+ int lpc;\r
+ int disable_proptx;\r
+ uint32 bus_txglom; /* bus:txglom */\r
+ int use_rxchain;\r
+ bool bus_rxglom; /* bus:rxglom */\r
+ int txglomsize;\r
+ uint32 ampdu_ba_wsize;\r
+ int dpc_cpucore;\r
+ int frameburst;\r
+ bool deepsleep;\r
} dhd_conf_t;\r
\r
+#ifdef BCMSDIO\r
+int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac);\r
void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path);\r
void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path);\r
-void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);\r
#if defined(HW_OOB)\r
void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip);\r
#endif\r
-void dhd_conf_set_fw_path(dhd_pub_t *dhd, char *fw_path);\r
-void dhd_conf_set_nv_path(dhd_pub_t *dhd, char *nv_path);\r
+#endif\r
+void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);\r
+void dhd_conf_set_conf_path_by_nv_path(dhd_pub_t *dhd, char *conf_path, char *nv_path);\r
int dhd_conf_set_band(dhd_pub_t *dhd);\r
uint dhd_conf_get_band(dhd_pub_t *dhd);\r
int dhd_conf_set_country(dhd_pub_t *dhd);\r
void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd);\r
void dhd_conf_set_srl(dhd_pub_t *dhd);\r
void dhd_conf_set_lrl(dhd_pub_t *dhd);\r
-void dhd_conf_set_glom(dhd_pub_t *dhd);\r
+void dhd_conf_set_bus_txglom(dhd_pub_t *dhd);\r
void dhd_conf_set_ampdu_ba_wsize(dhd_pub_t *dhd);\r
void dhd_conf_set_spect(dhd_pub_t *dhd);\r
-int dhd_conf_read_config(dhd_pub_t *dhd);\r
+void dhd_conf_set_txbf(dhd_pub_t *dhd);\r
+void dhd_conf_set_frameburst(dhd_pub_t *dhd);\r
+void dhd_conf_set_lpc(dhd_pub_t *dhd);\r
+void dhd_conf_set_disable_proptx(dhd_pub_t *dhd);\r
+int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path);\r
int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev);\r
uint dhd_conf_get_chip(void *context);\r
uint dhd_conf_get_chiprev(void *context);\r
* Customer code to add GPIO control during WLAN start/stop
* $Copyright Open Broadcom Corporation$
*
-* $Id: dhd_custom_gpio.c 447105 2014-01-08 05:27:09Z $
+* $Id: dhd_custom_gpio.c 493822 2014-07-29 13:20:26Z $
*/
#include <typedefs.h>
}
#ifdef GET_CUSTOM_MAC_ENABLE
-#if defined(CUSTOMER_HW)
-int rockchip_wifi_mac_addr(unsigned char *buf);
-#endif
/* Function to get custom MAC address */
int
dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
ret = wifi_platform_get_mac_addr(adapter, buf);
#endif
-#if defined(CUSTOMER_HW)
- ret = rockchip_wifi_mac_addr(buf);
-#endif
-
#ifdef EXAMPLE_GET_MAC
/* EXAMPLE code */
{
{"TR", "TR", 0},
{"NO", "NO", 0},
#endif /* EXMAPLE_TABLE */
-#if defined(CUSTOMER_HW2)
+#if defined(CUSTOMER_HW2) && !defined(CUSTOMER_HW5)
#if defined(BCM4335_CHIP)
{"", "XZ", 11}, /* Universal if Country code is unknown or empty */
#endif
{"RU", "RU", 1},
{"US", "US", 5}
#endif
-#endif /* CUSTOMER_HW2 */
+
+#elif defined(CUSTOMER_HW5)
+ {"", "XZ", 11},
+ {"AE", "AE", 212},
+ {"AG", "AG", 2},
+ {"AI", "AI", 2},
+ {"AL", "AL", 2},
+ {"AN", "AN", 3},
+ {"AR", "AR", 212},
+ {"AS", "AS", 15},
+ {"AT", "AT", 4},
+ {"AU", "AU", 212},
+ {"AW", "AW", 2},
+ {"AZ", "AZ", 2},
+ {"BA", "BA", 2},
+ {"BD", "BD", 2},
+ {"BE", "BE", 4},
+ {"BG", "BG", 4},
+ {"BH", "BH", 4},
+ {"BM", "BM", 15},
+ {"BN", "BN", 4},
+ {"BR", "BR", 212},
+ {"BS", "BS", 2},
+ {"BY", "BY", 3},
+ {"BW", "BW", 1},
+ {"CA", "CA", 212},
+ {"CH", "CH", 212},
+ {"CL", "CL", 212},
+ {"CN", "CN", 212},
+ {"CO", "CO", 212},
+ {"CR", "CR", 21},
+ {"CY", "CY", 212},
+ {"CZ", "CZ", 212},
+ {"DE", "DE", 212},
+ {"DK", "DK", 4},
+ {"DZ", "DZ", 1},
+ {"EC", "EC", 23},
+ {"EE", "EE", 4},
+ {"EG", "EG", 212},
+ {"ES", "ES", 212},
+ {"ET", "ET", 2},
+ {"FI", "FI", 4},
+ {"FR", "FR", 212},
+ {"GB", "GB", 212},
+ {"GD", "GD", 2},
+ {"GF", "GF", 2},
+ {"GP", "GP", 2},
+ {"GR", "GR", 212},
+ {"GT", "GT", 0},
+ {"GU", "GU", 17},
+ {"HK", "HK", 212},
+ {"HR", "HR", 4},
+ {"HU", "HU", 4},
+ {"IN", "IN", 212},
+ {"ID", "ID", 212},
+ {"IE", "IE", 5},
+ {"IL", "IL", 7},
+ {"IN", "IN", 212},
+ {"IS", "IS", 4},
+ {"IT", "IT", 212},
+ {"JO", "JO", 3},
+ {"JP", "JP", 212},
+ {"KH", "KH", 4},
+ {"KI", "KI", 1},
+ {"KR", "KR", 212},
+ {"KW", "KW", 5},
+ {"KY", "KY", 4},
+ {"KZ", "KZ", 212},
+ {"LA", "LA", 4},
+ {"LB", "LB", 6},
+ {"LI", "LI", 4},
+ {"LK", "LK", 3},
+ {"LS", "LS", 2},
+ {"LT", "LT", 4},
+ {"LR", "LR", 2},
+ {"LU", "LU", 3},
+ {"LV", "LV", 4},
+ {"MA", "MA", 2},
+ {"MC", "MC", 1},
+ {"MD", "MD", 2},
+ {"ME", "ME", 2},
+ {"MK", "MK", 2},
+ {"MN", "MN", 0},
+ {"MO", "MO", 2},
+ {"MR", "MR", 2},
+ {"MT", "MT", 4},
+ {"MQ", "MQ", 2},
+ {"MU", "MU", 2},
+ {"MV", "MV", 3},
+ {"MX", "MX", 212},
+ {"MY", "MY", 212},
+ {"NI", "NI", 0},
+ {"NL", "NL", 212},
+ {"NO", "NO", 4},
+ {"NP", "NP", 3},
+ {"NZ", "NZ", 9},
+ {"OM", "OM", 4},
+ {"PA", "PA", 17},
+ {"PE", "PE", 212},
+ {"PG", "PG", 2},
+ {"PH", "PH", 212},
+ {"PL", "PL", 212},
+ {"PR", "PR", 25},
+ {"PT", "PT", 212},
+ {"PY", "PY", 4},
+ {"RE", "RE", 2},
+ {"RO", "RO", 212},
+ {"RS", "RS", 2},
+ {"RU", "RU", 212},
+ {"SA", "SA", 212},
+ {"SE", "SE", 212},
+ {"SG", "SG", 212},
+ {"SI", "SI", 4},
+ {"SK", "SK", 212},
+ {"SN", "SN", 2},
+ {"SV", "SV", 25},
+ {"TH", "TH", 212},
+ {"TR", "TR", 212},
+ {"TT", "TT", 5},
+ {"TW", "TW", 212},
+ {"UA", "UA", 212},
+ {"UG", "UG", 2},
+ {"US", "US", 212},
+ {"UY", "UY", 5},
+ {"VA", "VA", 2},
+ {"VE", "VE", 3},
+ {"VG", "VG", 2},
+ {"VI", "VI", 18},
+ {"VN", "VN", 4},
+ {"YT", "YT", 2},
+ {"ZA", "ZA", 212},
+ {"ZM", "ZM", 2},
+ {"XT", "XT", 212},
+ {"XZ", "XZ", 11},
+ {"XV", "XV", 17},
+ {"Q1", "Q1", 77},
+#endif /* CUSTOMER_HW2 and CUSTOMER_HW5 */
};
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_flowrings.c jaganlv $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <proto/bcmevent.h>
+#include <dngl_stats.h>
+
+#include <dhd.h>
+
+#include <dhd_flowring.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <proto/802.1d.h>
+#include <pcie_core.h>
+#include <bcmmsgbuf.h>
+#include <dhd_pcie.h>
+
+static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da);
+
+static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da, uint16 *flowid);
+int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
+
+#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
+#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
+
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
+const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+int BCMFASTPATH
+dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
+{
+ return BCME_NORESOURCE;
+}
+
+/* Flow ring's queue management functions */
+
+void /* Initialize a flow ring's queue */
+dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
+{
+ ASSERT((queue != NULL) && (max > 0));
+
+ dll_init(&queue->list);
+ queue->head = queue->tail = NULL;
+ queue->len = 0;
+ queue->max = max - 1;
+ queue->failures = 0U;
+ queue->cb = &dhd_flow_queue_overflow;
+}
+
+void /* Register an enqueue overflow callback handler */
+dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
+{
+ ASSERT(queue != NULL);
+ queue->cb = cb;
+}
+
+
+int BCMFASTPATH /* Enqueue a packet in a flow ring's queue */
+dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+ int ret = BCME_OK;
+
+ ASSERT(queue != NULL);
+
+ if (queue->len >= queue->max) {
+ queue->failures++;
+ ret = (*queue->cb)(queue, pkt);
+ goto done;
+ }
+
+ if (queue->head) {
+ FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
+ } else {
+ queue->head = pkt;
+ }
+
+ FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
+
+ queue->tail = pkt; /* at tail */
+
+ queue->len++;
+
+done:
+ return ret;
+}
+
+void * BCMFASTPATH /* Dequeue a packet from a flow ring's queue, from head */
+dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
+{
+ void * pkt;
+
+ ASSERT(queue != NULL);
+
+ pkt = queue->head; /* from head */
+
+ if (pkt == NULL) {
+ ASSERT((queue->len == 0) && (queue->tail == NULL));
+ goto done;
+ }
+
+ queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
+ if (queue->head == NULL)
+ queue->tail = NULL;
+
+ queue->len--;
+
+ FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
+
+done:
+ return pkt;
+}
+
+void BCMFASTPATH /* Reinsert a dequeued packet back at the head */
+dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+ if (queue->head == NULL) {
+ queue->tail = pkt;
+ }
+
+ FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
+ queue->head = pkt;
+ queue->len++;
+}
+
+
+/* Init Flow Ring specific data structures */
+int
+dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
+{
+ uint32 idx;
+ uint32 flow_ring_table_sz;
+ uint32 if_flow_lkup_sz;
+ void * flowid_allocator;
+ flow_ring_table_t *flow_ring_table;
+ if_flow_lkup_t *if_flow_lkup = NULL;
+#ifdef PCIE_TX_DEFERRAL
+ uint32 count;
+#endif
+ void *lock = NULL;
+ unsigned long flags;
+
+ DHD_INFO(("%s\n", __FUNCTION__));
+
+ /* Construct a 16bit flow1d allocator */
+ flowid_allocator = id16_map_init(dhdp->osh,
+ num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED);
+ if (flowid_allocator == NULL) {
+ DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* Allocate a flow ring table, comprising of requested number of rings */
+ flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
+ flow_ring_table = (flow_ring_table_t *)MALLOC(dhdp->osh, flow_ring_table_sz);
+ if (flow_ring_table == NULL) {
+ DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Initialize flow ring table state */
+ bzero((uchar *)flow_ring_table, flow_ring_table_sz);
+ for (idx = 0; idx < num_flow_rings; idx++) {
+ flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
+ flow_ring_table[idx].flowid = (uint16)idx;
+ flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
+ if (flow_ring_table[idx].lock == NULL) {
+ DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ dll_init(&flow_ring_table[idx].list);
+
+ /* Initialize the per flow ring backup queue */
+ dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
+ FLOW_RING_QUEUE_THRESHOLD);
+ }
+
+ /* Allocate per interface hash table */
+ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+ if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
+ DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
+ if (if_flow_lkup == NULL) {
+ DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Initialize per interface hash table */
+ bzero((uchar *)if_flow_lkup, if_flow_lkup_sz);
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ int hash_ix;
+ if_flow_lkup[idx].status = 0;
+ if_flow_lkup[idx].role = 0;
+ for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
+ if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
+ }
+
+#ifdef PCIE_TX_DEFERRAL
+ count = BITS_TO_LONGS(num_flow_rings);
+ dhdp->bus->delete_flow_map = kzalloc(count, GFP_ATOMIC);
+ if (!dhdp->bus->delete_flow_map) {
+ DHD_ERROR(("%s: delete_flow_map alloc failure\n", __FUNCTION__));
+ goto fail;
+ }
+#endif
+
+ lock = dhd_os_spin_lock_init(dhdp->osh);
+ if (lock == NULL)
+ goto fail;
+
+ dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
+ bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ /* Now populate into dhd pub */
+ DHD_FLOWID_LOCK(lock, flags);
+ dhdp->num_flow_rings = num_flow_rings;
+ dhdp->flowid_allocator = (void *)flowid_allocator;
+ dhdp->flow_ring_table = (void *)flow_ring_table;
+ dhdp->if_flow_lkup = (void *)if_flow_lkup;
+ dhdp->flowid_lock = lock;
+ DHD_FLOWID_UNLOCK(lock, flags);
+
+ DHD_INFO(("%s done\n", __FUNCTION__));
+ return BCME_OK;
+
+fail:
+
+#ifdef PCIE_TX_DEFERRAL
+ if (dhdp->bus->delete_flow_map)
+ kfree(dhdp->bus->delete_flow_map);
+#endif
+ /* Destruct the per interface flow lkup table */
+ if (dhdp->if_flow_lkup != NULL) {
+ DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
+ }
+ if (flow_ring_table != NULL) {
+ for (idx = 0; idx < num_flow_rings; idx++) {
+ if (flow_ring_table[idx].lock != NULL)
+ dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+ }
+ MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+ }
+ id16_map_fini(dhdp->osh, flowid_allocator);
+
+ return BCME_NOMEM;
+}
+
+/* Deinit Flow Ring specific data structures */
+void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
+{
+ uint16 idx;
+ uint32 flow_ring_table_sz;
+ uint32 if_flow_lkup_sz;
+ flow_ring_table_t *flow_ring_table;
+ unsigned long flags;
+ void *lock;
+
+ DHD_INFO(("dhd_flow_rings_deinit\n"));
+
+ if (dhdp->flow_ring_table != NULL) {
+
+ ASSERT(dhdp->num_flow_rings > 0);
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+ dhdp->flow_ring_table = NULL;
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
+ if (flow_ring_table[idx].active) {
+ dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
+ }
+ ASSERT(flow_queue_empty(&flow_ring_table[idx].queue));
+
+ /* Deinit flow ring queue locks before destroying flow ring table */
+ dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+ flow_ring_table[idx].lock = NULL;
+ }
+
+ /* Destruct the flow ring table */
+ flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
+ MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+ }
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+
+ /* Destruct the per interface flow lkup table */
+ if (dhdp->if_flow_lkup != NULL) {
+ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+ bzero(dhdp->if_flow_lkup, sizeof(if_flow_lkup_sz));
+ DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
+ dhdp->if_flow_lkup = NULL;
+ }
+
+#ifdef PCIE_TX_DEFERRAL
+ if (dhdp->bus->delete_flow_map)
+ kfree(dhdp->bus->delete_flow_map);
+#endif
+
+ /* Destruct the flowid allocator */
+ if (dhdp->flowid_allocator != NULL)
+ dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
+
+ dhdp->num_flow_rings = 0U;
+ lock = dhdp->flowid_lock;
+ dhdp->flowid_lock = NULL;
+
+ DHD_FLOWID_UNLOCK(lock, flags);
+ dhd_os_spin_lock_deinit(dhdp->osh, lock);
+}
+
+uint8
+dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
+{
+ if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ ASSERT(if_flow_lkup);
+ return if_flow_lkup[ifindex].role;
+}
+
+#ifdef WLTDLS
+bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
+{
+ tdls_peer_node_t *cur = dhdp->peer_tbl.node;
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ return TRUE;
+ }
+ cur = cur->next;
+ }
+ return FALSE;
+}
+#endif /* WLTDLS */
+
+/* For a given interface, search the hash table for a matching flow */
+uint16
+dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+ int hash;
+ bool ismcast = FALSE;
+ flow_hash_info_t *cur;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+#ifdef WLTDLS
+ if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
+ is_tdls_destination(dhdp, da)) {
+ hash = DHD_FLOWRING_HASHINDEX(da, prio);
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+ while (cur != NULL) {
+ if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return cur->flowid;
+ }
+ cur = cur->next;
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return FLOWID_INVALID;
+ }
+#endif /* WLTDLS */
+ cur = if_flow_lkup[ifindex].fl_hash[prio];
+ if (cur) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return cur->flowid;
+ }
+
+ } else {
+
+ if (ETHER_ISMULTI(da)) {
+ ismcast = TRUE;
+ hash = 0;
+ } else {
+ hash = DHD_FLOWRING_HASHINDEX(da, prio);
+ }
+
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+
+ while (cur) {
+ if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
+ (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
+ (cur->flow_info.tid == prio))) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return cur->flowid;
+ }
+ cur = cur->next;
+ }
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ return FLOWID_INVALID;
+}
+
+/* Allocate Flow ID */
+static INLINE uint16
+dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+ flow_hash_info_t *fl_hash_node, *cur;
+ if_flow_lkup_t *if_flow_lkup;
+ int hash;
+ uint16 flowid;
+ unsigned long flags;
+
+ fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t));
+ memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ ASSERT(dhdp->flowid_allocator != NULL);
+ flowid = id16_map_alloc(dhdp->flowid_allocator);
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ if (flowid == FLOWID_INVALID) {
+ MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
+ DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
+ return FLOWID_INVALID;
+ }
+
+ fl_hash_node->flowid = flowid;
+ fl_hash_node->flow_info.tid = prio;
+ fl_hash_node->flow_info.ifindex = ifindex;
+ fl_hash_node->next = NULL;
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+ /* For STA non TDLS dest we allocate entry based on prio only */
+#ifdef WLTDLS
+ if (dhdp->peer_tbl.tdls_peer_count &&
+ (is_tdls_destination(dhdp, da))) {
+ hash = DHD_FLOWRING_HASHINDEX(da, prio);
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+ if (cur) {
+ while (cur->next) {
+ cur = cur->next;
+ }
+ cur->next = fl_hash_node;
+ } else {
+ if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+ }
+ } else
+#endif /* WLTDLS */
+ if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
+ } else {
+
+ /* For bcast/mcast assign first slot in in interface */
+ hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+ if (cur) {
+ while (cur->next) {
+ cur = cur->next;
+ }
+ cur->next = fl_hash_node;
+ } else
+ if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
+
+ return fl_hash_node->flowid;
+}
+
+/* Get flow ring ID, if not present try to create one */
+static INLINE int
+dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da, uint16 *flowid)
+{
+ uint16 id;
+ flow_ring_node_t *flow_ring_node;
+ flow_ring_table_t *flow_ring_table;
+ unsigned long flags;
+
+ DHD_INFO(("%s\n", __FUNCTION__));
+
+ if (!dhdp->flow_ring_table)
+ return BCME_ERROR;
+
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
+ id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
+
+ if (id == FLOWID_INVALID) {
+
+ if_flow_lkup_t *if_flow_lkup;
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (!if_flow_lkup[ifindex].status)
+ return BCME_ERROR;
+
+ id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
+ if (id == FLOWID_INVALID) {
+ DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
+ __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
+ return BCME_ERROR;
+ }
+
+ /* register this flowid in dhd_pub */
+ dhd_add_flowid(dhdp, ifindex, prio, da, id);
+ }
+
+ ASSERT(id < dhdp->num_flow_rings);
+
+ flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ if (flow_ring_node->active) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ *flowid = id;
+ return BCME_OK;
+ }
+ /* Init Flow info */
+ memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
+ memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
+ flow_ring_node->flow_info.tid = prio;
+ flow_ring_node->flow_info.ifindex = ifindex;
+ flow_ring_node->active = TRUE;
+ flow_ring_node->status = FLOW_RING_STATUS_PENDING;
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ dll_prepend(&dhdp->bus->const_flowring, &flow_ring_node->list);
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ /* Create and inform device about the new flow */
+ if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
+ != BCME_OK) {
+ DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
+ return BCME_ERROR;
+ }
+
+ *flowid = id;
+ return BCME_OK;
+}
+
+/* Update flowid information on the packet */
+int BCMFASTPATH
+dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
+{
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+ uint16 flowid;
+
+ if (dhd_bus_is_txmode_push(dhdp->bus))
+ return BCME_OK;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS) {
+ return BCME_BADARG;
+ }
+
+ if (!dhdp->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost,
+ &flowid) != BCME_OK) {
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
+
+ /* Tag the packet with flowid */
+ DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), flowid);
+ return BCME_OK;
+}
+
+void
+dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+ int hashix;
+ bool found = FALSE;
+ flow_hash_info_t *cur, *prev;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
+
+ cur = if_flow_lkup[ifindex].fl_hash[hashix];
+
+ if (cur) {
+ if (cur->flowid == flowid) {
+ found = TRUE;
+ }
+
+ prev = NULL;
+ while (!found && cur) {
+ if (cur->flowid == flowid) {
+ found = TRUE;
+ break;
+ }
+ prev = cur;
+ cur = cur->next;
+ }
+ if (found) {
+ if (!prev) {
+ if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
+ } else {
+ prev->next = cur->next;
+ }
+
+ /* deregister flowid from dhd_pub. */
+ dhd_del_flowid(dhdp, ifindex, flowid);
+
+ id16_map_free(dhdp->flowid_allocator, flowid);
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
+
+ return;
+ }
+ }
+ }
+
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
+ __FUNCTION__, flowid));
+}
+
+
+/* Delete all Flow rings assocaited with the given Interface */
+void
+dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
+{
+ uint32 id;
+ flow_ring_table_t *flow_ring_table;
+
+ DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ if (!dhdp->flow_ring_table)
+ return;
+
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+ for (id = 0; id < dhdp->num_flow_rings; id++) {
+ if (flow_ring_table[id].active &&
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+ (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
+ DHD_INFO(("%s: deleting flowid %d\n",
+ __FUNCTION__, flow_ring_table[id].flowid));
+ dhd_bus_flow_ring_delete_request(dhdp->bus,
+ (void *) &flow_ring_table[id]);
+ }
+ }
+}
+
+/* Delete flow/s for given peer address */
+void
+dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
+{
+ uint32 id;
+ flow_ring_table_t *flow_ring_table;
+
+ DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ if (!dhdp->flow_ring_table)
+ return;
+
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+ for (id = 0; id < dhdp->num_flow_rings; id++) {
+ if (flow_ring_table[id].active &&
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+ (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
+ (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
+ DHD_INFO(("%s: deleting flowid %d\n",
+ __FUNCTION__, flow_ring_table[id].flowid));
+ dhd_bus_flow_ring_delete_request(dhdp->bus,
+ (void *) &flow_ring_table[id]);
+ }
+ }
+}
+
+/* Handle Interface ADD, DEL operations */
+void
+dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 op, uint8 role)
+{
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ DHD_INFO(("%s: ifindex %u op %u role is %u \n",
+ __FUNCTION__, ifindex, op, role));
+ if (!dhdp->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
+
+ if_flow_lkup[ifindex].role = role;
+
+ if (!(DHD_IF_ROLE_STA(role))) {
+ if_flow_lkup[ifindex].status = TRUE;
+ DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
+ __FUNCTION__, ifindex, role));
+ /* Create Mcast Flow */
+ }
+ } else if (op == WLC_E_IF_DEL) {
+ if_flow_lkup[ifindex].status = FALSE;
+ DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
+ __FUNCTION__, ifindex, role));
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+}
+
+/* Handle a STA interface link status update */
+int
+dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
+{
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return BCME_BADARG;
+
+ DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+ if (status)
+ if_flow_lkup[ifindex].status = TRUE;
+ else
+ if_flow_lkup[ifindex].status = FALSE;
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ return BCME_OK;
+}
+/* Update flow priority mapping */
+int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
+{
+ uint16 flowid;
+ flow_ring_node_t *flow_ring_node;
+
+ if (map > DHD_FLOW_PRIO_TID_MAP)
+ return BCME_BADOPTION;
+
+ /* Check if we need to change prio map */
+ if (map == dhdp->flow_prio_map_type)
+ return BCME_OK;
+
+ /* If any ring is active we cannot change priority mapping for flow rings */
+ for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+ flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+ if (flow_ring_node->active)
+ return BCME_EPERM;
+ }
+ /* Infor firmware about new mapping type */
+ if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
+ return BCME_ERROR;
+
+ /* update internal structures */
+ dhdp->flow_prio_map_type = map;
+ if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
+ bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+ else
+ bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ return BCME_OK;
+}
+
+/* Set/Get flwo ring priority map */
+int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
+{
+ uint8 iovbuf[24];
+ if (!set) {
+ bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+ DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ *map = iovbuf[0];
+ return BCME_OK;
+ }
+ bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("%s: failed to set fl_prio_map \n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
--- /dev/null
+/*
+ * Header file describing the flow rings DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to create, delete and manage
+ *
+ * flow rings at high level
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_flowrings.h jaganlv $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_flowrings_h_
+#define _dhd_flowrings_h_
+
+/* Max pkts held in a flow ring's backup queue */
+#define FLOW_RING_QUEUE_THRESHOLD (2048)
+
+/* Number of H2D common rings : PCIE Spec Rev? */
+#define FLOW_RING_COMMON 2
+
+#define FLOWID_INVALID (ID16_INVALID)
+#define FLOWID_RESERVED (FLOW_RING_COMMON)
+
+#define FLOW_RING_STATUS_OPEN 0
+#define FLOW_RING_STATUS_PENDING 1
+#define FLOW_RING_STATUS_CLOSED 2
+#define FLOW_RING_STATUS_DELETE_PENDING 3
+#define FLOW_RING_STATUS_FLUSH_PENDING 4
+
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048
+
+#define DHD_FLOW_PRIO_AC_MAP 0
+#define DHD_FLOW_PRIO_TID_MAP 1
+
+
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_pkttag_fr {
+ uint16 flowid;
+ int dataoff;
+} dhd_pkttag_fr_t;
+
+#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow))
+#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset))
+
+#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid)
+#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff)
+
+/* Hashing a MacAddress for lkup into a per interface flow hash table */
+#define DHD_FLOWRING_HASH_SIZE 256
+#define DHD_FLOWRING_HASHINDEX(ea, prio) \
+ ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \
+ % DHD_FLOWRING_HASH_SIZE)
+
+#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
+#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
+#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
+#define DHD_FLOW_RING(dhdp, flowid) \
+ (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
+
+struct flow_queue;
+
+/* Flow Ring Queue Enqueue overflow callback */
+typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt);
+
+typedef struct flow_queue {
+ dll_t list; /* manage a flowring queue in a dll */
+ void * head; /* first packet in the queue */
+ void * tail; /* last packet in the queue */
+ uint16 len; /* number of packets in the queue */
+ uint16 max; /* maximum number of packets, queue may hold */
+ uint32 failures; /* enqueue failures due to queue overflow */
+ flow_queue_cb_t cb; /* callback invoked on threshold crossing */
+} flow_queue_t;
+
+#define flow_queue_len(queue) ((int)(queue)->len)
+#define flow_queue_max(queue) ((int)(queue)->max)
+#define flow_queue_avail(queue) ((int)((queue)->max - (queue)->len))
+#define flow_queue_full(queue) ((queue)->len >= (queue)->max)
+#define flow_queue_empty(queue) ((queue)->len == 0)
+
+typedef struct flow_info {
+ uint8 tid;
+ uint8 ifindex;
+ char sa[ETHER_ADDR_LEN];
+ char da[ETHER_ADDR_LEN];
+} flow_info_t;
+
+typedef struct flow_ring_node {
+ dll_t list; /* manage a constructed flowring in a dll, must be at first place */
+ flow_queue_t queue;
+ bool active;
+ uint8 status;
+ uint16 flowid;
+ flow_info_t flow_info;
+ void *prot_info;
+ void *lock; /* lock for flowring access protection */
+} flow_ring_node_t;
+typedef flow_ring_node_t flow_ring_table_t;
+
+typedef struct flow_hash_info {
+ uint16 flowid;
+ flow_info_t flow_info;
+ struct flow_hash_info *next;
+} flow_hash_info_t;
+
+typedef struct if_flow_lkup {
+ bool status;
+ uint8 role; /* Interface role: STA/AP */
+ flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */
+} if_flow_lkup_t;
+
+static INLINE flow_ring_node_t *
+dhd_constlist_to_flowring(dll_t *item)
+{
+ return ((flow_ring_node_t *)item);
+}
+
+/* Exported API */
+
+/* Flow ring's queue management functions */
+extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
+extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb);
+extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue);
+extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+
+extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings);
+
+extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp);
+
+extern uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da);
+
+extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
+ void *pktbuf);
+
+extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid);
+
+extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex);
+
+extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex,
+ char *addr);
+
+/* Handle Interface ADD, DEL operations */
+extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 op, uint8 role);
+
+/* Handle a STA interface link status update */
+extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 status);
+extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set);
+extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map);
+
+extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex);
+#endif /* _dhd_flowrings_h_ */
#include <osl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
#include <linux/rfkill-wlan.h>
-#ifdef CUSTOMER_HW
-
#ifdef CONFIG_MACH_ODROID_4210
#include <mach/gpio.h>
#include <mach/regs-gpio.h>
#include <plat/gpio-cfg.h>
-
#include <plat/sdhci.h>
-#include <plat/devs.h> // modifed plat-samsung/dev-hsmmcX.c EXPORT_SYMBOL(s3c_device_hsmmcx) added
-
+#include <plat/devs.h>
#define sdmmc_channel s3c_device_hsmmc0
#endif
-struct wifi_platform_data {
- int (*set_power)(bool val);
- int (*set_carddetect)(bool val);
- void *(*mem_prealloc)(int section, unsigned long size);
- int (*get_mac_addr)(unsigned char *buf);
- void *(*get_country_code)(char *ccode);
-};
-
-struct resource dhd_wlan_resources = {0};
struct wifi_platform_data dhd_wlan_control = {0};
#ifdef CUSTOMER_OOB
uint bcm_wlan_get_oob_irq(void)
{
- return rockchip_wifi_get_oob_irq();
+ uint host_oob_irq = 0;
+
+ host_oob_irq = rockchip_wifi_get_oob_irq();
+
+ printk("host_oob_irq: %d \r\n", host_oob_irq);
+
+ return host_oob_irq;
}
uint bcm_wlan_get_oob_irq_flags(void)
return err;
}
+int bcm_wlan_get_mac_address(unsigned char *buf)
+{
+ int err = 0;
+
+ printk("======== %s ========\n", __FUNCTION__);
+#ifdef EXAMPLE_GET_MAC
+ /* EXAMPLE code */
+ {
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+ }
+#endif /* EXAMPLE_GET_MAC */
+ err = rockchip_wifi_mac_addr(buf);
+
+ return err;
+}
+
#ifdef CONFIG_DHD_USE_STATIC_BUF
extern void *bcmdhd_mem_prealloc(int section, unsigned long size);
void* bcm_wlan_prealloc(int section, unsigned long size)
printk("======== %s ========\n", __FUNCTION__);
dhd_wlan_control.set_power = bcm_wlan_set_power;
dhd_wlan_control.set_carddetect = bcm_wlan_set_carddetect;
+ dhd_wlan_control.get_mac_addr = bcm_wlan_get_mac_address;
#ifdef CONFIG_DHD_USE_STATIC_BUF
dhd_wlan_control.mem_prealloc = bcm_wlan_prealloc;
#endif
return 0;
}
-#endif /* CUSTOMER_HW */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_ip.c 468932 2014-04-09 06:58:15Z $
+ * $Id: dhd_ip.c 502735 2014-09-16 00:53:02Z $
*/
#include <typedefs.h>
#include <osl.h>
}
}
+bool pkt_is_dhcp(osl_t *osh, void *p)
+{
+ uint8 *frame;
+ int length;
+ uint8 *pt; /* Pointer to type field */
+ uint16 ethertype;
+ struct ipv4_hdr *iph; /* IP frame pointer */
+ int ipl; /* IP frame length */
+ uint16 src_port;
+
+ ASSERT(osh && p);
+
+ frame = PKTDATA(osh, p);
+ length = PKTLEN(osh, p);
+
+ /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+ if (length < ETHER_HDR_LEN) {
+ DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+ return FALSE;
+ } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+ /* Frame is Ethernet II */
+ pt = frame + ETHER_TYPE_OFFSET;
+ } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+ !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+ pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+ } else {
+ DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ ethertype = ntoh16(*(uint16 *)pt);
+
+ /* Skip VLAN tag, if any */
+ if (ethertype == ETHER_TYPE_8021Q) {
+ pt += VLAN_TAG_LEN;
+
+ if (pt + ETHER_TYPE_LEN > frame + length) {
+ DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+ return FALSE;
+ }
+
+ ethertype = ntoh16(*(uint16 *)pt);
+ }
+
+ if (ethertype != ETHER_TYPE_IP) {
+ DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+ __FUNCTION__, ethertype, length));
+ return FALSE;
+ }
+
+ iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+ ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+ /* We support IPv4 only */
+ if ((ipl < (IPV4_OPTIONS_OFFSET + 2)) || (IP_VER(iph) != IP_VER_4)) {
+ DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+ return FALSE;
+ }
+
+ src_port = ntoh16(*(uint16 *)(pt + ETHER_TYPE_LEN + IPV4_OPTIONS_OFFSET));
+
+ return (src_port == 0x43 || src_port == 0x44);
+}
+
#ifdef DHDTCPACK_SUPPRESS
typedef struct {
- void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */
+ void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */
void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */
+ int ifidx;
+ uint8 supp_cnt;
+ dhd_pub_t *dhdp;
+ struct timer_list timer;
} tcpack_info_t;
typedef struct _tdata_psh_info_t {
return;
}
+static void dhd_tcpack_send(ulong data)
+{
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *cur_tbl = (tcpack_info_t *)data;
+ dhd_pub_t *dhdp;
+ int ifidx;
+ void* pkt;
+
+ if (!cur_tbl) {
+ return;
+ }
+
+ dhdp = cur_tbl->dhdp;
+ if (!dhdp) {
+ return;
+ }
+
+ dhd_os_tcpacklock(dhdp);
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+ pkt = cur_tbl->pkt_in_q;
+ ifidx = cur_tbl->ifidx;
+ if (!pkt) {
+ dhd_os_tcpackunlock(dhdp);
+ return;
+ }
+ cur_tbl->pkt_in_q = NULL;
+ cur_tbl->pkt_ether_hdr = NULL;
+ cur_tbl->ifidx = 0;
+ cur_tbl->supp_cnt = 0;
+ if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+ DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+ }
+
+ dhd_os_tcpackunlock(dhdp);
+
+ dhd_sendpkt(dhdp, ifidx, pkt);
+}
+
int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
{
int ret = BCME_OK;
dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
}
+ if (mode == TCPACK_SUP_HOLD) {
+ int i;
+ tcpack_sup_module_t *tcpack_sup_mod =
+ (tcpack_sup_module_t *)dhdp->tcpack_sup_module;
+ dhdp->tcpack_sup_ratio = TCPACK_SUPP_RATIO;
+ dhdp->tcpack_sup_delay = TCPACK_DELAY_TIME;
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++)
+ {
+ tcpack_sup_mod->tcpack_info_tbl[i].dhdp = dhdp;
+ init_timer(&tcpack_sup_mod->tcpack_info_tbl[i].timer);
+ tcpack_sup_mod->tcpack_info_tbl[i].timer.data =
+ (ulong)&tcpack_sup_mod->tcpack_info_tbl[i];
+ tcpack_sup_mod->tcpack_info_tbl[i].timer.function = dhd_tcpack_send;
+ }
+ }
+
exit:
dhd_os_tcpackunlock(dhdp);
return ret;
dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
{
tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+ int i;
if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
goto exit;
goto exit;
}
- tcpack_sup_mod->tcpack_info_cnt = 0;
- bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) {
+ PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q,
+ TRUE);
+ tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL;
+ tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL;
+ tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0;
+ tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0;
+ }
+ }
+ } else {
+ tcpack_sup_mod->tcpack_info_cnt = 0;
+ bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+ }
+
dhd_os_tcpackunlock(dhdp);
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer);
+ }
+ }
+
exit:
return;
}
bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
}
bzero(last_tdata_info, sizeof(tcpdata_info_t));
- DHD_TRACE(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+ DHD_ERROR(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
/* Don't increase "i" here, so that the prev last tcpdata_info is checked */
} else
/* No TCP flow with the same IP addr and TCP port is found
* in tcp_data_info_tbl. So add this flow to the table.
*/
- DHD_TRACE(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ DHD_ERROR(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
" TCP port %d %d\n",
__FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
return ret;
}
+bool
+dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ uint8 *new_ether_hdr; /* Ethernet header of the new packet */
+ uint16 new_ether_type; /* Ethernet type of the new packet */
+ uint8 *new_ip_hdr; /* IP header of the new packet */
+ uint8 *new_tcp_hdr; /* TCP header of the new packet */
+ uint32 new_ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */
+ uint16 new_ip_total_len; /* Total length of IP packet for the new packet */
+ uint32 new_tcp_hdr_len; /* TCP header length of the new packet */
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *tcpack_info_tbl;
+ int i, free_slot = TCPACK_INFO_MAXNUM;
+ bool hold = FALSE;
+
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+ goto exit;
+ }
+
+ if (dhdp->tcpack_sup_ratio == 1) {
+ goto exit;
+ }
+
+ new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+ DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+ __FUNCTION__, __LINE__, cur_framelen));
+ goto exit;
+ }
+
+ new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+ if (new_ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, new_ether_type));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+ new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+ new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+ if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+ goto exit;
+ }
+
+ new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+ cur_framelen -= new_ip_hdr_len;
+
+ ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+ DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+ /* is it an ack ? Allow only ACK flag, not to suppress others. */
+ if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+ DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+ __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+ goto exit;
+ }
+
+ new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+ new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ /* This packet has TCP data, so just send */
+ if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+ DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+ new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+ __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+ dhd_os_tcpacklock(dhdp);
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+ tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+
+ hold = TRUE;
+
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */
+ uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+ uint32 old_ip_hdr_len, old_tcp_hdr_len;
+ uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */
+
+ if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+ if (free_slot == TCPACK_INFO_MAXNUM) {
+ free_slot = i;
+ }
+ continue;
+ }
+
+ if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+ DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n",
+ __FUNCTION__, __LINE__, i));
+ hold = FALSE;
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+
+ old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+ old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+ old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+ old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+ old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* If either of IP address or TCP port number does not match, skip. */
+ if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+ &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+ memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+ &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) {
+ continue;
+ }
+
+ old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) {
+ tcpack_info_tbl[i].supp_cnt++;
+ if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) {
+ tcpack_info_tbl[i].pkt_in_q = NULL;
+ tcpack_info_tbl[i].pkt_ether_hdr = NULL;
+ tcpack_info_tbl[i].ifidx = 0;
+ tcpack_info_tbl[i].supp_cnt = 0;
+ hold = FALSE;
+ } else {
+ tcpack_info_tbl[i].pkt_in_q = pkt;
+ tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr;
+ tcpack_info_tbl[i].ifidx = ifidx;
+ }
+ PKTFREE(dhdp->osh, oldpkt, TRUE);
+ } else {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
+ dhd_os_tcpackunlock(dhdp);
+
+ if (!hold) {
+ del_timer_sync(&tcpack_info_tbl[i].timer);
+ }
+ goto exit;
+ }
+
+ if (free_slot < TCPACK_INFO_MAXNUM) {
+ /* No TCPACK packet with the same IP addr and TCP port is found
+ * in tcp_ack_info_tbl. So add this packet to the table.
+ */
+ DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+ __FUNCTION__, __LINE__, pkt, new_ether_hdr,
+ free_slot));
+
+ tcpack_info_tbl[free_slot].pkt_in_q = pkt;
+ tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr;
+ tcpack_info_tbl[free_slot].ifidx = ifidx;
+ tcpack_info_tbl[free_slot].supp_cnt = 1;
+ mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
+ jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay));
+ tcpack_sup_mod->tcpack_info_cnt++;
+ } else {
+ DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+ __FUNCTION__, __LINE__));
+ }
+ dhd_os_tcpackunlock(dhdp);
+
+exit:
+ return hold;
+}
#endif /* DHDTCPACK_SUPPRESS */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_ip.h 458522 2014-02-27 02:26:15Z $
+ * $Id: dhd_ip.h 502735 2014-09-16 00:53:02Z $
*/
#ifndef _dhd_ip_h_
} pkt_frag_t;
extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+extern bool pkt_is_dhcp(osl_t *osh, void *p);
#ifdef DHDTCPACK_SUPPRESS
#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */
+#define TCPACK_SUPP_RATIO 3
+#define TCPACK_DELAY_TIME 10 /* ms */
+
extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
-
+extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx);
/* #define DHDTCPACK_SUP_DBG */
#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
extern counter_tbl_t tack_tbl;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_linux.c 491481 2014-07-16 14:08:43Z $
+ * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $
*/
#include <typedefs.h>
#include <proto/ethernet.h>
#include <proto/bcmevent.h>
#include <proto/vlan.h>
-#include <proto/bcmudp.h>
-#include <proto/bcmdhcp.h>
#ifdef DHD_L2_FILTER
#include <proto/bcmicmp.h>
#endif
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
+#ifdef P2PONEINT
+#include <wl_cfgp2p.h>
+#endif
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
#include <wl_android.h>
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+#include <sdaudio.h>
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
/* Maximum STA per radio */
#define DHD_MAX_STA 32
static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif
+
#if defined(SOFTAP_TPUT_ENHANCE)
extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
#endif /* SOFTAP_TPUT_ENHANCE */
+#ifdef SET_RPS_CPUS
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
+void custom_rps_map_clear(struct netdev_rx_queue *queue);
+#ifdef CONFIG_MACH_UNIVERSAL5433
+#define RPS_CPUS_MASK "10"
+#else
+#define RPS_CPUS_MASK "6"
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
+#endif /* SET_RPS_CPUS */
+
static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
static struct notifier_block dhd_reboot_notifier = {
.notifier_call = dhd_reboot_callback,
unsigned long event;
};
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+#define MAX_WLANAUDIO_BLACKLIST 4
+
+struct wlanaudio_blacklist {
+ bool is_blacklist;
+ uint32 cnt;
+ ulong txfail_jiffies;
+ struct ether_addr blacklist_addr;
+};
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
/* When Perimeter locks are deployed, any blocking calls must be preceeded
* with a PERIM UNLOCK and followed by a PERIM LOCK.
* Examples of blocking calls are: schedule_timeout(), down_interruptible(),
struct wake_lock wl_rxwake; /* Wifi rx wakelock */
struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
struct wake_lock wl_wdwake; /* Wifi wd wakelock */
-#endif
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ struct wake_lock wl_intrwake; /* Host wakeup wakelock */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
/* net_device interface lock, prevent race conditions among net_dev interface
#endif
unsigned int unit;
struct notifier_block pm_notifier;
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+ struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST];
+ bool is_wlanaudio_blist;
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
} dhd_info_t;
#define DHDIF_FWDER(dhdif) FALSE
int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
module_param(dhd_rxf_prio, int, 0);
+int passive_channel_skip = 0;
+module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
+
#if !defined(BCMDHDUSB)
extern int dhd_dongle_ramsize;
module_param(dhd_dongle_ramsize, int, 0);
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+dhd_info_t *dhd_global = NULL;
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
+
/* DHD Perimiter lock only used in router with bypass forwarding. */
#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
/* Construct/Destruct a sta pool. */
static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
/* Return interface pointer */
static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
{
ASSERT(ifidx < DHD_MAX_IFS);
+
+ if (ifidx >= DHD_MAX_IFS)
+ return NULL;
+
return dhdp->info->iflist[ifidx];
}
dhdp->staid_allocator = NULL;
}
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void
+dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void *staid_allocator;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+ staid_allocator = dhdp->staid_allocator;
+
+ if (!sta_pool) {
+ DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!staid_allocator) {
+ DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* clear free pool */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ id16_map_clear(staid_allocator, max_sta, 1);
+
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ dhd_sta_free(dhdp, sta);
+ }
+}
+
/** Find STA with MAC address ea in an interface's STA list. */
dhd_sta_t *
dhd_find_sta(void *pub, int ifidx, void *ea)
ASSERT(ea != NULL);
ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
DHD_IF_STA_LIST_LOCK(ifp, flags);
ASSERT(ea != NULL);
ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
sta = dhd_sta_alloc((dhd_pub_t *)pub);
if (sta == DHD_STA_NULL) {
ASSERT(ea != NULL);
ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return;
DHD_IF_STA_LIST_LOCK(ifp, flags);
static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
void dhd_del_sta(void *pub, int ifidx, void *ea) {}
#endif /* PCIE_FULL_DONGLE */
}
#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#ifdef PKT_FILTER_SUPPORT
+void
+dhd_set_packet_filter_mode(struct net_device *dev, char *command)
+{
+ dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
+
+ dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
+}
+
+int
+dhd_set_packet_filter_ports(struct net_device *dev, char *command)
+{
+ int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
+ uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
+ dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhdi->pub;
+ char iovbuf[WLC_IOCTL_SMLEN];
+
+ /* get action */
+ action = bcm_strtoul(command, &command, 0);
+ if (action > PKT_FILTER_PORTS_MAX)
+ return BCME_BADARG;
+
+ if (action == PKT_FILTER_PORTS_LOOPBACK) {
+ /* echo the loopback value if port filter is supported else error */
+ bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
+ error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ if (error < 0) {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
+ return error;
+ }
+
+ if (strstr(iovbuf, "pktfltr2"))
+ return bcm_strtoul(command, &command, 0);
+ else {
+ DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ if (action == PKT_FILTER_PORTS_CLEAR) {
+ /* action 0 is clear all ports */
+ dhdp->pkt_filter_ports_count = 0;
+ bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
+ }
+ else {
+ portnum = bcm_strtoul(command, &command, 0);
+ if (portnum == 0) {
+ /* no ports to add or remove */
+ return BCME_BADARG;
+ }
+
+ /* get configured ports */
+ count = dhdp->pkt_filter_ports_count;
+ ports = dhdp->pkt_filter_ports;
+
+ if (action == PKT_FILTER_PORTS_ADD) {
+ /* action 1 is add ports */
+
+ /* copy new ports */
+ while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
+ for (i = 0; i < count; i++) {
+ /* duplicate port */
+ if (portnum == ports[i])
+ break;
+ }
+ if (portnum != ports[i])
+ ports[count++] = portnum;
+ portnum = bcm_strtoul(command, &command, 0);
+ }
+ } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
+ /* action 2 is remove ports */
+ bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
+ get_count = count;
+
+ while (portnum != 0) {
+ count = 0;
+ for (i = 0; i < get_count; i++) {
+ if (portnum != get_ports[i])
+ ports[count++] = get_ports[i];
+ }
+ get_count = count;
+ bcopy(ports, get_ports, count * sizeof(uint16));
+ portnum = bcm_strtoul(command, &command, 0);
+ }
+ }
+ dhdp->pkt_filter_ports_count = count;
+ }
+ return error;
+}
+
+static void
+dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
+{
+ int error = 0;
+ wl_pkt_filter_ports_t *portlist = NULL;
+ const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
+ + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
+ char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
+ char iovbuf[pkt_filter_ports_buf_len];
+
+ DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
+ enable, dhd->in_suspend, dhd->pkt_filter_mode,
+ dhd->pkt_filter_ports_count));
+
+ bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
+ portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
+ portlist->version = WL_PKT_FILTER_PORTS_VERSION;
+ portlist->reserved = 0;
+
+ if (enable) {
+ if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY))
+ return;
+
+ /* enable port filter */
+ dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
+ if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH)
+ /* whitelist mode: FORWARD_ON_MATCH */
+ dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
+ else
+ /* blacklist mode: DISCARD_ON_MATCH */
+ dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
+
+ portlist->count = dhd->pkt_filter_ports_count;
+ bcopy(dhd->pkt_filter_ports, portlist->ports,
+ dhd->pkt_filter_ports_count * sizeof(uint16));
+ } else {
+ /* disable port filter */
+ portlist->count = 0;
+ dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
+ dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
+ }
+
+ DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
+ portlist->count));
+
+ /* update ports */
+ bcm_mkiovar("pkt_filter_ports",
+ (char*)portlist,
+ (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
+ iovbuf, sizeof(iovbuf));
+ error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (error < 0)
+ DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
+
+ /* update mode */
+ bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
+ sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
+ error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (error < 0)
+ DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
+
+ return;
+}
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
void dhd_set_packet_filter(dhd_pub_t *dhd)
{
#ifdef PKT_FILTER_SUPPORT
int i;
DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
+
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+ dhd_enable_packet_filter_ports(dhd, value);
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
/* 1 - Enable packet filter, only allow unicast packet to send up */
/* 0 - Disable packet filter */
if (dhd_pkt_filter_enable && (!value ||
{
int i = 0;
- ASSERT(dhd);
+ if (!dhd) {
+ DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
+ return DHD_BAD_IF;
+ }
while (i < DHD_MAX_IFS) {
if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
return i;
uint buflen;
int ret;
- ASSERT(dhd && dhd->iflist[ifidx]);
- dev = dhd->iflist[ifidx]->net;
- if (!dev)
- return;
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ if (!dev)
+ return;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_lock_bh(dev);
+ netif_addr_lock_bh(dev);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
- cnt = netdev_mc_count(dev);
+ cnt = netdev_mc_count(dev);
#else
- cnt = dev->mc_count;
+ cnt = dev->mc_count;
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
#endif
- /* Determine initial value of allmulti flag */
+ /* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
/* Send down the multicast list first. */
buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
- dhd_ifname(&dhd->pub, ifidx), cnt));
+ dhd_ifname(&dhd->pub, ifidx), cnt));
return;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_lock_bh(dev);
+ netif_addr_lock_bh(dev);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
- netdev_for_each_mc_addr(ha, dev) {
- if (!cnt)
- break;
- memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- cnt--;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
}
#else
for (mclist = dev->mc_list; (mclist && (cnt > 0));
- cnt--, mclist = mclist->next) {
- memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- }
+ cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
#endif
memset(&ioc, 0, sizeof(ioc));
DHD_PERIM_LOCK(&dhd->pub);
if (ret != BCME_OK) {
DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
- dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ goto done;
}
#ifdef PCIE_FULL_DONGLE
/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
- if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
+ if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
char iovbuf[WLC_IOCTL_SMLEN];
uint32 var_int = 1;
memset(iovbuf, 0, sizeof(iovbuf));
bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
- dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
+ ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
+
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ }
}
#endif /* PCIE_FULL_DONGLE */
done:
#endif /* PROP_TXSTATUS */
+#ifdef DHD_RX_DUMP
+typedef struct {
+ uint16 type;
+ const char *str;
+} PKTTYPE_INFO;
+
+static const PKTTYPE_INFO packet_type_info[] =
+{
+ { ETHER_TYPE_IP, "IP" },
+ { ETHER_TYPE_ARP, "ARP" },
+ { ETHER_TYPE_BRCM, "BRCM" },
+ { ETHER_TYPE_802_1X, "802.1X" },
+ { ETHER_TYPE_WAI, "WAPI" },
+ { 0, ""}
+};
+
+static const char *_get_packet_type_str(uint16 type)
+{
+ int i;
+ int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
+
+ for (i = 0; i < n; i++) {
+ if (packet_type_info[i].type == type)
+ return packet_type_info[i].str;
+ }
+
+ return packet_type_info[n].str;
+}
+#endif /* DHD_RX_DUMP */
+
+#if defined(DHD_8021X_DUMP)
+void
+dhd_tx_dump(osl_t *osh, void *pkt)
+{
+ uint8 *dump_data;
+ uint16 protocol;
+
+ dump_data = PKTDATA(osh, pkt);
+ protocol = (dump_data[12] << 8) | dump_data[13];
+
+ DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol)));
+
+ if (protocol == ETHER_TYPE_802_1X) {
+ DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
+ dump_data[14], dump_data[15], dump_data[30]));
+ }
+
+#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
+ {
+ int i;
+ uint datalen;
+ datalen = PKTLEN(osh, pkt);
+
+ for (i = 0; i < (datalen - 4); i++) {
+ DHD_ERROR(("%02X ", dump_data[i]));
+ if ((i & 15) == 15)
+ printk("\n");
+ }
+ DHD_ERROR(("\n"));
+ }
+#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
+}
+#endif /* DHD_8021X_DUMP */
+
int BCMFASTPATH
dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
{
return BCME_ERROR;
}
-#ifdef DHDTCPACK_SUPPRESS
- /* If this packet has replaced another packet and got freed, just return */
- if (dhd_tcpack_suppress(dhdp, pktbuf))
- return ret;
-#endif /* DHDTCPACK_SUPPRESS */
-
/* Look into the packet and update the packet priority */
#ifndef PKTPRIO_OVERRIDE
if (PKTPRIO(pktbuf) == 0)
pktsetprio(pktbuf, FALSE);
-#ifdef PCIE_FULL_DONGLE
+#if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
/*
* Lkup the per interface hash table, for a matching flowring. If one is not
* available, allocate a unique flowid and add a flowring entry.
#ifdef WLMEDIA_HTSF
dhd_htsf_addtxts(dhdp, pktbuf);
#endif
+#if defined(DHD_8021X_DUMP)
+ dhd_tx_dump(dhdp->osh, pktbuf);
+#endif
#ifdef PROP_TXSTATUS
{
if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
}
#endif /* DHD_WMF */
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ /* If this packet has been hold or got freed, just return */
+ if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx))
+ return 0;
+ } else {
+ /* If this packet has replaced another packet and got freed, just return */
+ if (dhd_tcpack_suppress(&dhd->pub, pktbuf))
+ return 0;
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+
ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
done:
dhd->pub.tx_dropped++;
}
else {
- dhd->pub.tx_packets++;
- ifp->stats.tx_packets++;
- ifp->stats.tx_bytes += datalen;
+
+#ifdef PROP_TXSTATUS
+ /* tx_packets counter can counted only when wlfc is disabled */
+ if (!dhd_wlfc_is_supported(&dhd->pub))
+#endif
+ {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
+ }
}
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
}
}
-#ifdef DHD_RX_DUMP
-typedef struct {
- uint16 type;
- const char *str;
-} PKTTYPE_INFO;
-
-static const PKTTYPE_INFO packet_type_info[] =
-{
- { ETHER_TYPE_IP, "IP" },
- { ETHER_TYPE_ARP, "ARP" },
- { ETHER_TYPE_BRCM, "BRCM" },
- { ETHER_TYPE_802_1X, "802.1X" },
- { ETHER_TYPE_WAI, "WAPI" },
- { 0, ""}
-};
-
-static const char *_get_packet_type_str(uint16 type)
-{
- int i;
- int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
-
- for (i = 0; i < n; i++) {
- if (packet_type_info[i].type == type)
- return packet_type_info[i].str;
- }
-
- return packet_type_info[n].str;
-}
-#endif /* DHD_RX_DUMP */
-
#ifdef DHD_WMF
bool
eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
if (ETHER_ISUCAST(eh->ether_dhost)) {
if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
- dhd_sendpkt(dhdp, ifidx, pktbuf);
+ dhd_sendpkt(dhdp, ifidx, pktbuf);
continue;
}
} else {
protocol = (dump_data[12] << 8) | dump_data[13];
if (protocol == ETHER_TYPE_802_1X) {
- DHD_ERROR(("ETHER_TYPE_802_1X: "
+ DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
"ver %d, type %d, replay %d\n",
dump_data[14], dump_data[15],
dump_data[30]));
}
}
#endif /* WLBTAMP */
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
+ dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
+ uint datalen = PKTLEN(dhd->pub.osh, txp);
+
+ if (success) {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
+ } else {
+ ifp->stats.tx_dropped++;
+ }
+ }
+#endif
}
static struct net_device_stats *
#ifdef CUSTOM_DPC_CPUCORE
set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#else
+ if (dhd->pub.conf->dpc_cpucore >= 0) {
+ printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+ set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
+ }
#endif
#ifdef CUSTOM_SET_CPUCORE
dhd->pub.current_dpc = current;
#endif /* CUSTOM_SET_CPUCORE */
-
/* Run until signal received */
while (1) {
if (!binary_sema_down(tsk)) {
else
break;
}
-
complete_and_exit(&tsk->completed, 0);
}
#ifdef CUSTOM_SET_CPUCORE
dhd->pub.current_rxf = current;
#endif /* CUSTOM_SET_CPUCORE */
-
/* Run until signal received */
while (1) {
if (down_interruptible(&tsk->sema) == 0) {
else
break;
}
-
complete_and_exit(&tsk->completed, 0);
}
}
#endif
+#ifdef CONFIG_MACH_UNIVERSAL5433
+ /* old revision does not send hang message */
+ if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
+#else
if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
return OSL_ERROR(bcmerror);
}
+#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
+int dhd_deepsleep(dhd_info_t *dhd, int flag)
+{
+ char iovbuf[20];
+ uint powervar = 0;
+ dhd_pub_t *dhdp;
+ int cnt = 0;
+ int ret = 0;
+
+ dhdp = &dhd->pub;
+
+ switch (flag) {
+ case 1 : /* Deepsleep on */
+ DHD_ERROR(("dhd_deepsleep: ON\n"));
+ /* give some time to sysioc_work before deepsleep */
+ OSL_SLEEP(200);
+#ifdef PKT_FILTER_SUPPORT
+ /* disable pkt filter */
+ dhd_enable_packet_filter(0, dhdp);
+#endif /* PKT_FILTER_SUPPORT */
+ /* Disable MPC */
+ powervar = 0;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* Enable Deepsleep */
+ powervar = 1;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ break;
+
+ case 0: /* Deepsleep Off */
+ DHD_ERROR(("dhd_deepsleep: OFF\n"));
+
+ /* Disable Deepsleep */
+ for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
+ powervar = 0;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("deepsleep", (char *)&powervar, 4,
+ iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0);
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("deepsleep", (char *)&powervar, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
+ sizeof(iovbuf), FALSE, 0)) < 0) {
+ DHD_ERROR(("the error of dhd deepsleep status"
+ " ret value :%d\n", ret));
+ } else {
+ if (!(*(int *)iovbuf)) {
+ DHD_ERROR(("deepsleep mode is 0,"
+ " count: %d\n", cnt));
+ break;
+ }
+ }
+ }
+
+ /* Enable MPC */
+ powervar = 1;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ break;
+ }
+ return 0;
+}
static int
dhd_stop(struct net_device *net)
exit:
if (ifidx == 0 && !dhd_download_fw_on_driverload)
wl_android_wifi_off(net);
+ else {
+ if (dhd->pub.conf->deepsleep)
+ dhd_deepsleep(dhd, 1);
+ }
dhd->pub.rxcnt_timeout = 0;
dhd->pub.txcnt_timeout = 0;
#endif
int ifidx;
int32 ret = 0;
-#ifndef WL_CFG80211
- u32 up = 0;
-#endif
printk("%s: Enter %p\n", __FUNCTION__, net);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
}
}
}
+ if (dhd_download_fw_on_driverload) {
+ if (dhd->pub.conf->deepsleep)
+ dhd_deepsleep(dhd, 0);
+ }
/* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
goto exit;
}
dhd_set_scb_probe(&dhd->pub);
-#else
- ret = wldev_ioctl(net, WLC_UP, &up, sizeof(up), true);
- if (unlikely(ret)) {
- DHD_ERROR(("WLC_UP error (%d)\n", ret));
- }
#endif /* WL_CFG80211 */
}
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
mutex_unlock(&_dhd_sdio_mutex_lock_);
#endif
#endif /* MULTIPLE_SUPPLICANT */
{
dhd_if_event_t *if_event;
-#ifdef WL_CFG80211
+#if defined(WL_CFG80211) && !defined(P2PONEINT)
if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
return BCME_OK;
#endif /* WL_CFG80211 */
+#ifdef SET_RPS_CPUS
+ custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
if (need_rtnl_lock)
unregister_netdev(ifp->net);
else
.ndo_set_multicast_list = dhd_set_multicast_list,
#endif
};
+
+#ifdef P2PONEINT
+extern int wl_cfgp2p_if_open(struct net_device *net);
+extern int wl_cfgp2p_if_stop(struct net_device *net);
+
+static struct net_device_ops dhd_cfgp2p_ops_virt = {
+ .ndo_open = wl_cfgp2p_if_open,
+ .ndo_stop = wl_cfgp2p_if_stop,
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* P2PONEINT */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
#ifdef DEBUGGER
fs = get_fs();
filep = filp_open(logstrs_path, O_RDONLY, 0);
if (IS_ERR(filep)) {
- DHD_ERROR(("Failed to open the file logstrs.bin in %s", __FUNCTION__));
+ DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__));
goto fail;
}
error = vfs_stat(logstrs_path, &stat);
if (error) {
- DHD_ERROR(("Failed in %s to find file stat", __FUNCTION__));
+ DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__));
goto fail;
}
logstrs_size = (int) stat.size;
raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
if (raw_fmts == NULL) {
- DHD_ERROR(("Failed to allocate raw_fmts memory"));
+ DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
goto fail;
}
if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
- DHD_ERROR(("Error: Log strings file read failed"));
+ DHD_ERROR(("Error: Log strings file read failed\n"));
goto fail;
}
}
fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
if (fmts == NULL) {
- DHD_ERROR(("Failed to allocate fmts memory"));
+ DHD_ERROR(("Failed to allocate fmts memory\n"));
goto fail;
}
}
dhd_conf_reset(&dhd->pub);
dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
+ dhd_conf_preinit(&dhd->pub);
/* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
* This is indeed a hack but we have to make it work properly before we have a better
* solution
*/
dhd_update_fw_nv_path(dhd);
+#ifndef BUILD_IN_KERNEL
+ dhd_conf_read_config(&dhd->pub, dhd->conf_path);
+#endif
/* Set network interface name if it was provided as module parameter */
if (iface_name[0]) {
wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
+#endif /* BCMPCIE_OOB_HOST_WAKE */
#endif /* CONFIG_HAS_WAKELOCK */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
mutex_init(&dhd->dhd_net_if_mutex);
#ifdef BCMSDIO
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
#elif defined(BCMPCIE)
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
#else
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
#endif /* BCMSDIO */
dhd->dhd_state = dhd_state;
dhd_found++;
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+ dhd_global = dhd;
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
return &dhd->pub;
fail:
const char *fw = NULL;
const char *nv = NULL;
const char *conf = NULL;
- char firmware[100] = {0};
+ char firmware[100] = {0};
char nvram[100] = {0};
- char config[100] = "/system/etc/firmware/config.txt";
+ //char config[100] = "";
wifi_adapter_info_t *adapter = dhdinfo->adapter;
/* set default firmware and nvram path for built-in type driver */
// if (!dhd_download_fw_on_driverload) {
- rkwifi_set_firmware(firmware, nvram);
+ rkwifi_set_firmware(firmware, nvram);
#ifdef CONFIG_BCMDHD_FW_PATH
- fw = CONFIG_BCMDHD_FW_PATH;
+ fw = CONFIG_BCMDHD_FW_PATH;
#else
- fw = firmware;
+ fw = firmware;
#endif /* CONFIG_BCMDHD_FW_PATH */
#ifdef CONFIG_BCMDHD_NVRAM_PATH
- nv = CONFIG_BCMDHD_NVRAM_PATH;
+ nv = CONFIG_BCMDHD_NVRAM_PATH;
#else
- nv = nvram;
+ nv = nvram;
#endif /* CONFIG_BCMDHD_NVRAM_PATH */
-#ifdef CONFIG_BCMDHD_CONFIG_PATH
- conf = CONFIG_BCMDHD_CONFIG_PATH;
-#else
- conf = config;
-#endif /* CONFIG_BCMDHD_CONFIG_PATH */
// }
/* check if we need to initialize the path */
return FALSE;
}
if (dhdinfo->conf_path[0] == '\0') {
- DHD_ERROR(("config path not found\n"));
- return FALSE;
+ dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
}
#endif /* BCMEMBEDIMAGE */
}
-#ifdef EXYNOS5433_PCIE_WAR
-extern int enum_wifi;
-#endif /* EXYNOS5433_PCIE_WAR */
int
dhd_bus_start(dhd_pub_t *dhdp)
{
ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
dhd->fw_path, dhd->nv_path, dhd->conf_path);
if (ret < 0) {
-#ifdef EXYNOS5433_PCIE_WAR
- enum_wifi = 0;
-#endif /* EXYNOS5433_PCIE_WAR */
DHD_ERROR(("%s: failed to download firmware %s\n",
__FUNCTION__, dhd->fw_path));
DHD_PERIM_UNLOCK(dhdp);
return ret;
}
-#ifdef EXYNOS5433_PCIE_WAR
- enum_wifi = 1;
-#endif /* EXYNOS5433_PCIE_WAR */
}
if (dhd->pub.busstate != DHD_BUS_LOAD) {
DHD_PERIM_UNLOCK(dhdp);
DHD_PERIM_UNLOCK(dhdp);
return ret;
}
-#if defined(OOB_INTR_ONLY)
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
/* Host registration for OOB interrupt */
if (dhd_bus_oob_intr_register(dhdp)) {
/* deactivate timer and wait for the handler to finish */
-
+#if !defined(BCMPCIE_OOB_HOST_WAKE)
DHD_GENERAL_LOCK(&dhd->pub, flags);
dhd->wd_timer_valid = FALSE;
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
del_timer_sync(&dhd->timer);
- DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
dhd_os_sdunlock(dhdp);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
return -ENODEV;
}
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ dhd_os_sdlock(dhdp);
+ dhd_bus_oob_intr_set(dhdp, TRUE);
+#else
/* Enable oob at firmware */
dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
#endif
#ifdef PCIE_FULL_DONGLE
{
return ret;
#else
return 0;
-#endif
+#endif
}
}
}
return 0;
}
-#endif
+#endif
+
+#ifdef SUPPORT_AP_POWERSAVE
+#define RXCHAIN_PWRSAVE_PPS 10
+#define RXCHAIN_PWRSAVE_QUIET_TIME 10
+#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
+int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
+{
+ char iovbuf[128];
+ int32 pps = RXCHAIN_PWRSAVE_PPS;
+ int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
+ int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
+
+ if (enable) {
+ bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("Failed to enable AP power save\n"));
+ }
+ bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("Failed to set pps\n"));
+ }
+ bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
+ 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("Failed to set quiet time\n"));
+ }
+ bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
+ 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("Failed to set stas assoc check\n"));
+ }
+ } else {
+ bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("Failed to disable AP power save\n"));
+ }
+ }
+
+ return 0;
+}
+#endif /* SUPPORT_AP_POWERSAVE */
+
+
#if defined(READ_CONFIG_FROM_FILE)
#include <linux/fs.h>
#include <linux/ctype.h>
memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
return ret;
}
+ } else if (!strcmp(name, "lpc")) {
+ int ret = 0;
+ char buf[32];
+ uint iovlen;
+ var_int = (int)simple_strtol(value, NULL, 0);
+ if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
+ }
+ iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
+ }
+ if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
+ }
+ return ret;
+ } else if (!strcmp(name, "vht_features")) {
+ int ret = 0;
+ char buf[32];
+ uint iovlen;
+ var_int = (int)simple_strtol(value, NULL, 0);
+
+ if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
+ }
+ iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
+ }
+ if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
+ }
+ return ret;
} else {
uint iovlen;
char iovbuf[WLC_IOCTL_SMLEN];
char eventmask[WL_EVENTING_MASK_LEN];
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
uint32 buf_key_b4_m4 = 1;
+#ifndef WL_CFG80211
+ u32 up = 0;
+#endif
uint8 msglen;
- eventmsgs_ext_t *eventmask_msg;
- char iov_buf[WLC_IOCTL_SMLEN];
+ eventmsgs_ext_t *eventmask_msg = NULL;
+ char* iov_buf = NULL;
int ret2 = 0;
#ifdef WLAIBSS
aibss_bcn_force_config_t bcn_config;
#if defined(CUSTOM_AMPDU_RELEASE)
int32 ampdu_release = 0;
#endif
+#if defined(CUSTOM_AMSDU_AGGSF)
+ int32 amsdu_aggsf = 0;
+#endif
#if defined(BCMSDIO)
#ifdef PROP_TXSTATUS
#ifdef BCMCCX
uint32 ccx = 1;
#endif
-
+#ifdef SOFTAP_UAPSD_OFF
+ uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
uint32 apsta = 1; /* Enable APSTA mode */
#elif defined(SOFTAP_AND_GC)
ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
if (ret < 0) {
DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
- return BCME_NOTUP;
+ ret = BCME_NOTUP;
+ goto done;
}
memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
} else {
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
FALSE, 0)) < 0) {
DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
- return BCME_NOTUP;
+ ret = BCME_NOTUP;
+ goto done;
}
/* Update public MAC address after reading from Firmware */
memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
__FUNCTION__, ret));
- return 0;
+ goto done;
}
if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
(op_mode == DHD_FLAG_HOSTAP_MODE)) {
DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
}
#endif
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif
+#ifdef SOFTAP_UAPSD_OFF
+ bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
+#endif /* SOFTAP_UAPSD_OFF */
} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
(op_mode == DHD_FLAG_MFG_MODE)) {
#if defined(ARP_OFFLOAD_SUPPORT)
}
#if defined(SOFTAP_AND_GC)
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
- (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
- DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
- }
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+ (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
+ }
#endif
memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
ETHER_SET_LOCALADDR(&p2p_ea);
}
}
#else
- (void)concurrent_mode;
+ (void)concurrent_mode;
#endif
}
DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
}
#endif /* DHD_ENABLE_LPC */
+ dhd_conf_set_lpc(dhd);
/* Set PowerSave mode */
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
}
#endif /* defined(BCMSDIO) */
- dhd_conf_set_glom(dhd);
+ dhd_conf_set_bus_txglom(dhd);
/* Setup timeout if Beacons are lost and roam is off to report link down */
bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
}
#endif /* USE_WL_TXBF */
+ dhd_conf_set_txbf(dhd);
#ifdef USE_WL_FRAMEBURST
/* Set frameburst to value */
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
}
#endif /* USE_WL_FRAMEBURST */
+ dhd_conf_set_frameburst(dhd);
#ifdef DHD_SET_FW_HIGHSPEED
/* Set ack_ratio */
bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
}
}
#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+ dhd_conf_set_ampdu_ba_wsize(dhd);
+ iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
#ifdef WLAIBSS
/* Configure custom IBSS beacon transmission */
if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
bcn_config.len = sizeof(bcn_config);
bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
- sizeof(aibss_bcn_force_config_t), iov_buf, sizeof(iov_buf));
+ sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN);
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
- sizeof(iov_buf), TRUE, 0)) < 0) {
+ WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
__FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
AIBSS_BCN_FLOOD_DUR, ret));
}
}
#endif /* CUSTOM_AMPDU_MPDU */
- dhd_conf_set_ampdu_ba_wsize(dhd);
#if defined(CUSTOM_AMPDU_RELEASE)
ampdu_release = CUSTOM_AMPDU_RELEASE;
}
#endif /* CUSTOM_AMPDU_RELEASE */
+#if defined(CUSTOM_AMSDU_AGGSF)
+ amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
+ if (amsdu_aggsf != 0) {
+ bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
+ }
+ }
+#endif /* CUSTOM_AMSDU_AGGSF */
+
#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
/* Read 4-way handshake requirements */
if (dhd_use_idsup == 1) {
#ifdef WLAIBSS
setbit(eventmask, WLC_E_AIBSS_TXFAIL);
#endif /* WLAIBSS */
+#ifdef CUSTOMER_HW10
+ clrbit(eventmask, WLC_E_TRACE);
+#else
setbit(eventmask, WLC_E_TRACE);
-
+#endif
/* Write updated Event mask */
bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
if (eventmask_msg == NULL) {
DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
- return BCME_NOMEM;
+ ret = BCME_NOMEM;
+ goto done;
}
bzero(eventmask_msg, msglen);
eventmask_msg->ver = EVENTMSGS_VER;
eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
/* Read event_msgs_ext mask */
- bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, sizeof(iov_buf));
- ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, sizeof(iov_buf), FALSE, 0);
+ bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
+ ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
if (ret2 != BCME_UNSUPPORTED)
ret = ret2;
if (ret2 == 0) { /* event_msgs_ext must be supported */
eventmask_msg->command = EVENTMSGS_SET_MASK;
eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
- msglen, iov_buf, sizeof(iov_buf));
+ msglen, iov_buf, WLC_IOCTL_SMLEN);
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
- iov_buf, sizeof(iov_buf), TRUE, 0)) < 0) {
+ iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
- kfree(eventmask_msg);
goto done;
}
} else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
- kfree(eventmask_msg);
goto done;
} /* unsupported is ok */
- kfree(eventmask_msg);
dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
sizeof(scan_assoc_time), TRUE, 0);
if (arpoe && !ap_fw_loaded)
#else
if (arpoe)
-#endif
+#endif
{
dhd_arp_offload_enable(dhd, TRUE);
dhd_arp_offload_set(dhd, dhd_arp_mode);
bcmstrtok(&ptr, "\n", 0);
/* Print fw version info */
DHD_ERROR(("Firmware version = %s\n", buf));
-#if defined(BCMSDIO)
dhd_set_version_info(dhd, buf);
-#endif /* defined(BCMSDIO) */
}
#if defined(BCMSDIO)
- dhd_txglom_enable(dhd, TRUE);
+ dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
#endif /* defined(BCMSDIO) */
+ dhd_conf_set_disable_proptx(dhd);
#if defined(BCMSDIO)
#ifdef PROP_TXSTATUS
if (disable_proptx ||
#ifdef WL11U
dhd_interworking_enable(dhd);
#endif /* WL11U */
+#ifndef WL_CFG80211
+ dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
+#endif
done:
+
+ if (eventmask_msg)
+ kfree(eventmask_msg);
+ if (iov_buf)
+ kfree(iov_buf);
+
return ret;
}
ASSERT(dev);
if (netif_running(dev)) {
- DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+ DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
return BCME_NOTDOWN;
}
switch (ndo_work->event) {
case NETDEV_UP:
- DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
+ DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__));
ret = dhd_ndo_enable(pub, TRUE);
if (ret < 0) {
DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
net = ifp->net;
ASSERT(net && (ifp->idx == ifidx));
+#ifndef P2PONEINT
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
ASSERT(!net->open);
net->get_stats = dhd_get_stats;
ASSERT(!net->netdev_ops);
net->netdev_ops = &dhd_ops_virt;
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+#else
+ net->netdev_ops = &dhd_cfgp2p_ops_virt;
+#endif /* P2PONEINT */
/* Ok, link into the network layer... */
if (ifidx == 0) {
goto fail;
}
+#ifdef SET_RPS_CPUS
+ err = custom_rps_map_set(net->_rx, RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
+ if (err < 0)
+ DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__, err));
+#endif /* SET_RPS_CPUS */
+
printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
dhd_bus_stop(dhd->pub.bus, TRUE);
}
-#if defined(OOB_INTR_ONLY)
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
dhd_bus_oob_intr_unregister(dhdp);
#endif
}
if (!dhd)
return;
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+ dhd_global = NULL;
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
dhd->pub.up = 0;
}
if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+ dhd_bus_detach(dhdp);
#ifdef PCIE_FULL_DONGLE
dhd_flow_rings_deinit(dhdp);
#endif
- dhd_bus_detach(dhdp);
if (dhdp->prot)
dhd_prot_detach(dhdp);
*/
if (ifp->net->reg_state == NETREG_UNINITIALIZED)
free_netdev(ifp->net);
- else
+ else {
+#ifdef SET_RPS_CPUS
+ custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
unregister_netdev(ifp->net);
+ }
ifp->net = NULL;
#ifdef DHD_WMF
dhd_wmf_cleanup(dhdp, 0);
wake_lock_destroy(&dhd->wl_rxwake);
wake_lock_destroy(&dhd->wl_ctrlwake);
wake_lock_destroy(&dhd->wl_wdwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_destroy(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
#endif /* CONFIG_HAS_WAKELOCK */
}
+
#ifdef DHDTCPACK_SUPPRESS
/* This will free all MEM allocated for TCPACK SUPPRESS */
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
}
}
+void
+dhd_clear(dhd_pub_t *dhdp)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ int i;
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ ptr = dhdp->reorder_bufs[i];
+
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
+
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ dhdp->reorder_bufs[i] = NULL;
+ }
+ }
+
+ dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
+ }
+}
+
static void
dhd_module_cleanup(void)
{
gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
- if (buf == NULL && kmalloc_if_fail)
- buf = kmalloc(size, flags);
+ if (buf == NULL) {
+ DHD_ERROR(("%s: failed to alloc memory, section: %d,"
+ " size: %dbytes\n", __FUNCTION__, section, size));
+ if (kmalloc_if_fail)
+ buf = kmalloc(size, flags);
+ }
return buf;
}
}
#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+static int
+dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data)
+{
+ int cnt;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ struct ether_addr *addr = &event->addr;
+ uint32 type = ntoh32_ua((void *)&event->event_type);
+
+ switch (type) {
+ case WLC_E_TXFAIL:
+ if (addr != NULL)
+ bcm_ether_ntoa(addr, eabuf);
+ else
+ return (BCME_ERROR);
+
+ for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
+ if (dhd->wlanaudio_blist[cnt].is_blacklist)
+ break;
+
+ if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
+ addr, ETHER_ADDR_LEN)) {
+ /* Mac address is Same */
+ dhd->wlanaudio_blist[cnt].cnt++;
+
+ if (dhd->wlanaudio_blist[cnt].cnt < 15) {
+ /* black list is false */
+ if ((dhd->wlanaudio_blist[cnt].cnt > 10) &&
+ (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
+ < 100)) {
+ dhd->wlanaudio_blist[cnt].is_blacklist = true;
+ dhd->is_wlanaudio_blist = true;
+ }
+ } else {
+ if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
+ (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
+ > 100)) {
+
+ bzero(&dhd->wlanaudio_blist[cnt],
+ sizeof(struct wlanaudio_blacklist));
+ }
+ }
+ break;
+ } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
+ (!dhd->wlanaudio_blist[cnt].cnt)) {
+ bcopy(addr,
+ (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr,
+ ETHER_ADDR_LEN);
+ dhd->wlanaudio_blist[cnt].cnt++;
+ dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies;
+
+ bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf);
+ break;
+ }
+ }
+ break;
+ case WLC_E_AUTH :
+ case WLC_E_AUTH_IND :
+ case WLC_E_DEAUTH :
+ case WLC_E_DEAUTH_IND :
+ case WLC_E_ASSOC:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC:
+ case WLC_E_REASSOC_IND:
+ case WLC_E_DISASSOC:
+ case WLC_E_DISASSOC_IND:
+ {
+ int bl_cnt = 0;
+
+ if (addr != NULL)
+ bcm_ether_ntoa(addr, eabuf);
+ else
+ return (BCME_ERROR);
+
+ for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
+ if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
+ addr, ETHER_ADDR_LEN)) {
+ /* Mac address is Same */
+ if (dhd->wlanaudio_blist[cnt].is_blacklist) {
+ /* black list is true */
+ bzero(&dhd->wlanaudio_blist[cnt],
+ sizeof(struct wlanaudio_blacklist));
+ }
+ }
+ }
+
+ for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
+ if (dhd->wlanaudio_blist[cnt].is_blacklist)
+ bl_cnt++;
+ }
+
+ if (!bl_cnt)
+ {
+ dhd->is_wlanaudio_blist = false;
+ }
+
+ break;
+ }
+ }
+ return BCME_OK;
+}
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
static int
dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
wl_event_msg_t *event, void **data)
{
int bcmerror = 0;
+
ASSERT(dhd != NULL);
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+ bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data);
+
+ if (bcmerror != BCME_OK)
+ return (bcmerror);
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
#ifdef SHOW_LOGTRACE
- bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
#else
- bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
#endif /* SHOW_LOGTRACE */
if (bcmerror != BCME_OK)
}
else {
/* Could not allocate a sk_buf */
- DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+ DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
}
break;
} /* case WLC_E_BTA_HCI_EVENT */
}
else {
/* Could not allocate a sk_buf */
- DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+ DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
}
}
#endif /* LOG_INTO_TCPDUMP */
}
}
+
int dhd_os_send_hang_message(dhd_pub_t *dhdp)
{
int ret = 0;
#endif
return 0;
}
+
+int dhd_os_check_wakelock_all(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+ KERNEL_VERSION(2, 6, 36)))
+ dhd_info_t *dhd;
+
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+ /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+ if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+ wake_lock_active(&dhd->wl_wdwake) ||
+ wake_lock_active(&dhd->wl_rxwake) ||
+ wake_lock_active(&dhd->wl_ctrlwake))) {
+ return 1;
+ }
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+ return 1;
+#endif
+ return 0;
+}
+
int net_os_wake_unlock(struct net_device *dev)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
return ret;
}
+#ifdef BCMPCIE_OOB_HOST_WAKE
+int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ int ret = 0;
+
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
+#endif
+ }
+ return ret;
+}
+
+int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ int ret = 0;
+
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ /* if wl_intrwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_intrwake)) {
+ wake_unlock(&dhd->wl_intrwake);
+ }
+#endif
+ }
+ return ret;
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
* by a paired function call to dhd_wakelock_restore. returns current wakelock counter
*/
return pub->up;
}
-#if defined(BCMSDIO)
/* function to collect firmware, chip id and chip version info */
void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
{
"\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
}
-#endif /* defined(BCMSDIO) */
+
int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
{
int ifidx;
}
#endif /* DHD_L2_FILTER */
+#ifdef SET_RPS_CPUS
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
+{
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_SPINLOCK(rps_map_lock);
+
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
+ return err;
+ }
+
+ map = kzalloc(max_t(unsigned int,
+ RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+ GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_cpu(cpu, mask)
+ map->cpus[i++] = cpu;
+
+ if (i)
+ map->len = i;
+ else {
+ kfree(map);
+ DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
+ map = NULL;
+ }
+
+ spin_lock(&rps_map_lock);
+ old_map = rcu_dereference_protected(queue->rps_map,
+ lockdep_is_held(&rps_map_lock));
+ rcu_assign_pointer(queue->rps_map, map);
+ spin_unlock(&rps_map_lock);
+
+ if (map)
+ static_key_slow_inc(&rps_needed);
+ if (old_map) {
+ kfree_rcu(old_map, rcu);
+ static_key_slow_dec(&rps_needed);
+ }
+ free_cpumask_var(mask);
+
+ DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
+ return map->len;
+}
+
+void custom_rps_map_clear(struct netdev_rx_queue *queue)
+{
+ struct rps_map *map;
+
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+ map = rcu_dereference_protected(queue->rps_map, 1);
+ if (map) {
+ RCU_INIT_POINTER(queue->rps_map, NULL);
+ kfree_rcu(map, rcu);
+ DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
+ }
+}
+#endif /* SET_RPS_CPUS */
+
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+void
+SDA_setSharedMemory4Send(unsigned int buffer_id,
+ unsigned char *buffer, unsigned int buffer_size,
+ unsigned int packet_size, unsigned int headroom_size)
+{
+ dhd_info_t *dhd = dhd_global;
+
+ sda_packet_length = packet_size;
+
+ ASSERT(dhd);
+ if (dhd == NULL)
+ return;
+}
+
+void
+SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb)
+{
+ dhd_info_t *dhd = dhd_global;
+
+ ASSERT(dhd);
+ if (dhd == NULL)
+ return;
+}
+
+
+unsigned long long
+SDA_getTsf(unsigned char vif_id)
+{
+ dhd_info_t *dhd = dhd_global;
+ uint64 tsf_val;
+ char buf[WLC_IOCTL_SMLEN];
+ int ifidx = 0;
+
+ struct tsf {
+ uint32 low;
+ uint32 high;
+ } tsf_buf;
+
+ memset(buf, 0, sizeof(buf));
+
+ if (vif_id == 0) /* wlan0 tsf */
+ ifidx = dhd_ifname2idx(dhd, "wlan0");
+ else if (vif_id == 1) /* p2p0 tsf */
+ ifidx = dhd_ifname2idx(dhd, "p2p0");
+
+ bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf));
+
+ if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) {
+ DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__));
+ return 0;
+ }
+
+ memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+ tsf_val = (uint64)tsf_buf.high;
+ DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
+ __FUNCTION__, tsf_buf.high, tsf_buf.low));
+
+ return ((tsf_val << 32) | tsf_buf.low);
+}
+EXPORT_SYMBOL(SDA_getTsf);
+
+unsigned int
+SDA_syncTsf(void)
+{
+ dhd_info_t *dhd = dhd_global;
+ int tsf_sync = 1;
+ char iovbuf[WLC_IOCTL_SMLEN];
+
+ bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ return 0;
+}
+
+extern struct net_device *wl0dot1_dev;
+
+void
+BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size)
+{
+ struct sk_buff *skb;
+ sda_packet_t *shm_packet = packet;
+ dhd_info_t *dhd = dhd_global;
+ int cnt;
+
+ static unsigned int cnt_t = 1;
+
+ ASSERT(dhd);
+ if (dhd == NULL)
+ return;
+
+ if (dhd->is_wlanaudio_blist) {
+ for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
+ if (dhd->wlanaudio_blist[cnt].is_blacklist == true) {
+ if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet,
+ shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN))
+ return;
+ }
+ }
+ }
+
+ if ((cnt_t % 10000) == 0)
+ cnt_t = 0;
+
+ cnt_t++;
+
+ /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
+#define TXOFF 26
+ skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC);
+
+ skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE);
+ skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE);
+ skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */
+
+ /* p2p_net */
+ skb->dev = wl0dot1_dev;
+ shm_packet->txTsf = 0x0;
+ shm_packet->rxTsf = 0x0;
+ memcpy(skb->data, &shm_packet->headroom,
+ sda_packet_length - OFFSETOF(sda_packet_t, headroom));
+ shm_packet->desc.ready_to_copy = 0;
+
+ dhd_start_xmit(skb, skb->dev);
+}
+
+void
+SDA_registerCallback4Recv(unsigned char *pBufferTotal,
+ unsigned int BufferTotalSize)
+{
+ dhd_info_t *dhd = dhd_global;
+
+ ASSERT(dhd);
+ if (dhd == NULL)
+ return;
+}
+
+
+void
+SDA_setSharedMemory4Recv(unsigned char *pBufferTotal,
+ unsigned int BufferTotalSize,
+ unsigned int BufferUnitSize,
+ unsigned int Headroomsize)
+{
+ dhd_info_t *dhd = dhd_global;
+
+ ASSERT(dhd);
+ if (dhd == NULL)
+ return;
+}
+
+
+void
+SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize)
+{
+ dhd_info_t *dhd = dhd_global;
+
+ ASSERT(dhd);
+ if (dhd == NULL)
+ return;
+}
+
+EXPORT_SYMBOL(SDA_setSharedMemory4Send);
+EXPORT_SYMBOL(SDA_registerCallback4SendDone);
+EXPORT_SYMBOL(SDA_syncTsf);
+EXPORT_SYMBOL(SDA_function4Send);
+EXPORT_SYMBOL(SDA_registerCallback4Recv);
+EXPORT_SYMBOL(SDA_setSharedMemory4Recv);
+EXPORT_SYMBOL(SDA_function4RecvDone);
+
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
void *dhd_get_pub(struct net_device *dev)
{
dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
extern int bcm_wlan_set_plat_data(void);
#endif /* CUSTOMER_HW */
-struct wifi_platform_data {
- int (*set_power)(bool val);
- int (*set_carddetect)(bool val);
- void *(*mem_prealloc)(int section, unsigned long size);
- int (*get_mac_addr)(unsigned char *buf);
- void *(*get_country_code)(char *ccode);
-};
-
#define WIFI_PLAT_NAME "bcmdhd_wlan"
#define WIFI_PLAT_NAME2 "bcm4329_wlan"
#define WIFI_PLAT_EXT "bcmdhd_wifi_platform"
#if !defined(CONFIG_DTS)
#if defined(DHD_OF_SUPPORT)
static bool dts_enabled = TRUE;
-extern struct resource dhd_wlan_resources;
extern struct wifi_platform_data dhd_wlan_control;
#else
static bool dts_enabled = FALSE;
struct resource dhd_wlan_resources = {0};
struct wifi_platform_data dhd_wlan_control = {0};
-#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+#endif /* !defind(DHD_OF_SUPPORT) */
#endif /* !defind(CONFIG_DTS) */
static int dhd_wifi_platform_load(void);
}
-#if defined(CUSTOMER_HW)
-int rockchip_wifi_mac_addr(unsigned char *buf);
-#endif
int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
{
- //struct wifi_platform_data *plat_data;
+ struct wifi_platform_data *plat_data;
DHD_ERROR(("%s\n", __FUNCTION__));
-
-#if defined(CUSTOMER_HW)
- return rockchip_wifi_mac_addr(buf);
-#else
if (!buf || !adapter || !adapter->wifi_plat_data)
return -EINVAL;
plat_data = adapter->wifi_plat_data;
return plat_data->get_mac_addr(buf);
}
return -EOPNOTSUPP;
-#endif
}
void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
return NULL;
}
+#ifndef CUSTOMER_HW
static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
{
struct resource *resource;
{},
};
#endif /* CONFIG_DTS */
+
static struct platform_driver wifi_platform_dev_driver = {
.probe = wifi_plat_dev_drv_probe,
.remove = wifi_plat_dev_drv_remove,
return FALSE;
}
+#endif
static int wifi_ctrlfunc_register_drv(void)
{
- int err = 0;
- struct device *dev1, *dev2;
wifi_adapter_info_t *adapter;
+#ifndef CUSTOMER_HW
+ int err = 0;
+ struct device *dev1, *dev2;
dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+#endif
-#if !defined(CONFIG_DTS)
+#if !defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
if (!dts_enabled) {
if (dev1 == NULL && dev2 == NULL) {
DHD_ERROR(("no wifi platform data, skip\n"));
dhd_wifi_platdata->num_adapters = 1;
dhd_wifi_platdata->adapters = adapter;
+#ifndef CUSTOMER_HW
if (dev1) {
err = platform_driver_register(&wifi_platform_dev_driver);
if (err) {
return err;
}
}
+#endif
#if !defined(CONFIG_DTS)
if (dts_enabled) {
#endif /* !defined(CONFIG_DTS) */
-#ifdef CONFIG_DTS
+#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver);
#endif /* CONFIG_DTS */
void wifi_ctrlfunc_unregister_drv(void)
{
- struct device *dev1, *dev2;
-#ifdef CONFIG_DTS
+#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
DHD_ERROR(("unregister wifi platform drivers\n"));
platform_driver_unregister(&wifi_platform_dev_driver);
#else
+#ifndef CUSTOMER_HW
+ struct device *dev1, *dev2;
dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
if (!dts_enabled)
if (dev1 == NULL && dev2 == NULL)
return;
-
+#endif
DHD_ERROR(("unregister wifi platform drivers\n"));
+#ifndef CUSTOMER_HW
if (dev1)
platform_driver_unregister(&wifi_platform_dev_driver);
if (dev2)
platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+#endif
if (dts_enabled) {
wifi_adapter_info_t *adapter;
adapter = &dhd_wifi_platdata->adapters[0];
dhd_wifi_platdata = NULL;
}
+#ifndef CUSTOMER_HW
static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
{
dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
.name = WIFI_PLAT_EXT,
}
};
+#endif
int dhd_wifi_platform_register_drv(void)
{
int err = 0;
+#ifndef CUSTOMER_HW
struct device *dev;
/* register Broadcom wifi platform data driver if multi-chip is enabled,
return -ENXIO;
}
err = platform_driver_register(&dhd_wifi_platform_dev_driver);
- } else {
+ } else
+#endif
+ {
err = wifi_ctrlfunc_register_drv();
/* no wifi ctrl func either, load bus directly and ignore this error */
void dhd_wifi_platform_unregister_drv(void)
{
+#ifndef CUSTOMER_HW
if (cfg_multichip)
platform_driver_unregister(&dhd_wifi_platform_dev_driver);
else
+#endif
wifi_ctrlfunc_unregister_drv();
}
end:
if (err)
wl_android_exit();
+#if !defined(MULTIPLE_SUPPLICANT)
else
wl_android_post_init();
+#endif
return err;
}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_msgbuf.c 490973 2014-07-14 12:32:56Z $
+ * $Id: dhd_msgbuf.c 504484 2014-09-24 10:11:20Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <pcie_core.h>
#include <bcmpcie.h>
+#include <dhd_pcie.h>
+#include <dhd_ip.h>
+
+/*
+ * PCIE D2H DMA Complete Sync Modes
+ *
+ * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
+ * Host system memory. A WAR using one of 3 approaches is needed:
+ * 1. Dongle places ia modulo-253 seqnum in last word of each D2H message
+ * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
+ * writes in the last word of each work item. Each work item has a seqnum
+ * number = sequence num % 253.
+ * 3. Read Barrier: Dongle does a host memory read access prior to posting an
+ * interrupt.
+ * Host does not participate with option #3, other than reserving a host system
+ * memory location for the dongle to read.
+ */
+#define PCIE_D2H_SYNC
+#define PCIE_D2H_SYNC_WAIT_TRIES 1024
+#define PCIE_D2H_SYNC_BZERO /* bzero a message before updating the RD offset */
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
#define IOCTL_HDR_LEN 12
#endif /* TXP_FLUSH_NITEMS */
ring_mem_t *ringmem;
ring_state_t *ringstate;
+#if defined(PCIE_D2H_SYNC)
+ uint32 seqnum;
+#endif /* PCIE_D2H_SYNC */
+ void *secdma;
} msgbuf_ring_t;
+#if defined(PCIE_D2H_SYNC)
+/* Custom callback attached based upon D2H DMA Sync mode used in dongle. */
+typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+#endif /* PCIE_D2H_SYNC */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
uint32 d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */
dhd_mem_map_t d2h_dma_readindx_buf; /* For holding dma ringupd buf - completion read */
+#if defined(PCIE_D2H_SYNC)
+ d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
+ ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
+ ulong d2h_sync_wait_tot; /* total wait loops */
+#endif /* PCIE_D2H_SYNC */
dhd_dmaxfer_t dmaxfer;
bool dmaxfer_in_progress;
static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+static void dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen);
static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen);
static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
- NULL,
+ dhd_prot_noop, /* 0 is invalid message type */
dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
NULL,
NULL,
};
+
+#if defined(PCIE_D2H_SYNC)
+
+/*
+ * D2H DMA to completion callback handlers. Based on the mode advertised by the
+ * dongle through the PCIE shared region, the appropriate callback will be
+ * registered in the proto layer to be invoked prior to precessing any message
+ * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
+ * does not require host participation, then a noop callback handler will be
+ * bound that simply returns the msgtype.
+ */
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum,
+ uint32 tries, uchar *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot);
+
+/* Debug print a livelock avert by dropping a D2H message */
+static void
+dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum, uint32 tries,
+ uchar *msg, int msglen)
+{
+ DHD_ERROR(("LIVELOCK DHD<%p> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>\n",
+ dhd, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
+ dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot));
+ prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
+}
+
+/* Sync on a D2H DMA to complete using SEQNUM mode */
+static uint8 BCMFASTPATH
+dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen)
+{
+ uint32 tries;
+ uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ int num_words = msglen / sizeof(uint32); /* num of 32bit words */
+ volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
+ dhd_prot_t *prot = dhd->prot;
+
+ ASSERT(msglen == RING_LEN_ITEMS(ring));
+
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ uint32 msg_seqnum = *marker;
+ if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
+ ring->seqnum++; /* next expected sequence number */
+ goto dma_completed;
+ }
+
+ if (tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+
+ dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen);
+
+ ring->seqnum++; /* skip this message ... leak of a pktid */
+ return 0; /* invalid msgtype 0 -> noop callback */
+
+dma_completed:
+
+ prot->d2h_sync_wait_tot += tries;
+ return msg->msg_type;
+}
+
+/* Sync on a D2H DMA to complete using XORCSUM mode */
+static uint8 BCMFASTPATH
+dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen)
+{
+ uint32 tries;
+ uint32 prot_checksum = 0; /* computed checksum */
+ int num_words = msglen / sizeof(uint32); /* num of 32bit words */
+ uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ dhd_prot_t *prot = dhd->prot;
+
+ ASSERT(msglen == RING_LEN_ITEMS(ring));
+
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
+ if (prot_checksum == 0U) { /* checksum is OK */
+ if (msg->epoch == ring_seqnum) {
+ ring->seqnum++; /* next expected sequence number */
+ goto dma_completed;
+ }
+ }
+
+ if (tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+
+ dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen);
+
+ ring->seqnum++; /* skip this message ... leak of a pktid */
+ return 0; /* invalid msgtype 0 -> noop callback */
+
+dma_completed:
+
+ prot->d2h_sync_wait_tot += tries;
+ return msg->msg_type;
+}
+
+/* Do not sync on a D2H DMA */
+static uint8 BCMFASTPATH
+dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen)
+{
+ return msg->msg_type;
+}
+
+/* Initialize the D2H DMA Sync mode, per D2H ring seqnum and dhd stats */
+static void
+dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot)
+{
+ prot->d2h_sync_wait_max = 0UL;
+ prot->d2h_sync_wait_tot = 0UL;
+
+ prot->d2hring_tx_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_rx_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_ctrl_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+
+ if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
+ prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
+ else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
+ prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
+ else
+ prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+}
+
+#endif /* PCIE_D2H_SYNC */
+
/*
* +---------------------------------------------------------------------------+
* PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
* and the metadata may be retrieved using the previously allocated packet id.
* +---------------------------------------------------------------------------+
*/
-#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */
+#define MAX_PKTID_ITEMS (8192) /* Maximum number of pktids supported */
typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle,
void *pkt);
static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt,
- uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma);
+ uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma);
static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt,
- dmaaddr_t physaddr, uint32 len, uint8 dma);
+ dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma);
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id,
- dmaaddr_t *physaddr, uint32 *len);
+ dmaaddr_t *physaddr, uint32 *len, void **secdma);
/* Packet metadata saved in packet id mapper */
typedef struct dhd_pktid_item {
uint16 len; /* length of mapped packet's buffer */
void *pkt; /* opaque native pointer to a packet */
dmaaddr_t physaddr; /* physical address of mapped packet's buffer */
+ void *secdma;
} dhd_pktid_item_t;
typedef struct dhd_pktid_map {
#define NATIVE_TO_PKTID_CLEAR(map) dhd_pktid_map_clear(map)
#define NATIVE_TO_PKTID_RSV(map, pkt) dhd_pktid_map_reserve((map), (pkt))
-#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma) \
- dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), (uint8)dma)
-#define NATIVE_TO_PKTID(map, pkt, pa, len, dma) \
- dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), (uint8)dma)
+#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma, secdma) \
+ dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), (uint8)dma, \
+ (void *)(secdma))
+#define NATIVE_TO_PKTID(map, pkt, pa, len, dma, secdma) \
+ dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), (uint8)dma, (void *)(secdma))
-#define PKTID_TO_NATIVE(map, pktid, pa, len) \
+#define PKTID_TO_NATIVE(map, pktid, pa, len, secdma) \
dhd_pktid_map_free((map), (uint32)(pktid), \
- (dmaaddr_t *)&(pa), (uint32 *)&(len))
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **) &secdma)
#define PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
+#define FLOWRING_NAME "h2dflr"
+#define RING_IS_FLOWRING(ring) \
+ ((strncmp(ring->name, FLOWRING_NAME, sizeof(FLOWRING_NAME))) == (0))
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
+
/*
* +---------------------------------------------------------------------------+
* Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
static INLINE void
dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey,
- dmaaddr_t physaddr, uint32 len, uint8 dma)
+ dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
locker->dma = dma; /* store contents in locker */
locker->physaddr = physaddr;
locker->len = (uint16)len; /* 16bit len */
+ locker->secdma = secdma;
}
static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt,
- dmaaddr_t physaddr, uint32 len, uint8 dma)
+ dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma)
{
uint32 nkey = dhd_pktid_map_reserve(handle, pkt);
if (nkey != DHD_PKTID_INVALID) {
- dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma);
+ dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma, secdma);
}
return nkey;
}
*/
static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey,
- dmaaddr_t *physaddr, uint32 *len)
+ dmaaddr_t *physaddr, uint32 *len, void **secdma)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
*physaddr = locker->physaddr; /* return contents of locker */
*len = (uint32)locker->len;
+ *secdma = locker->secdma;
return locker->pkt;
}
return BCME_NOMEM;
}
+#if defined(PCIE_D2H_SYNC)
+ dhd_prot_d2h_sync_init(dhd, prot);
+#endif /* PCIE_D2H_SYNC */
+
prot->dmaxfer.srcmem.va = NULL;
prot->dmaxfer.destmem.va = NULL;
prot->dmaxfer_in_progress = FALSE;
uint32 dma_block_size = 4 * length;
if (prot == NULL) {
- DHD_ERROR(("prot is not inited\n"));
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
return BCME_ERROR;
}
ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4));
bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size);
OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size);
- DHD_ERROR(("H2D_WRITEINDX_ARRAY_HOST: %d-bytes "
- "inited for dma'ing h2d-w indices\n",
+ DHD_ERROR(("%s: H2D_WRITEINDX_ARRAY_HOST: %d-bytes "
+ "inited for dma'ing h2d-w indices\n", __FUNCTION__,
prot->h2d_dma_writeindx_buf_len));
break;
ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4));
bzero(prot->h2d_dma_readindx_buf.va, dma_block_size);
OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size);
- DHD_ERROR(("H2D_READINDX_ARRAY_HOST %d-bytes "
- "inited for dma'ing h2d-r indices\n",
+ DHD_ERROR(("%s: H2D_READINDX_ARRAY_HOST %d-bytes "
+ "inited for dma'ing h2d-r indices\n", __FUNCTION__,
prot->h2d_dma_readindx_buf_len));
break;
ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4));
bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size);
OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size);
- DHD_ERROR(("D2H_WRITEINDX_ARRAY_HOST %d-bytes "
- "inited for dma'ing d2h-w indices\n",
+ DHD_ERROR(("%s: D2H_WRITEINDX_ARRAY_HOST %d-bytes "
+ "inited for dma'ing d2h-w indices\n", __FUNCTION__,
prot->d2h_dma_writeindx_buf_len));
break;
ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4));
bzero(prot->d2h_dma_readindx_buf.va, dma_block_size);
OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size);
- DHD_ERROR(("D2H_READINDX_ARRAY_HOST %d-bytes "
- "inited for dma'ing d2h-r indices\n",
+ DHD_ERROR(("%s: D2H_READINDX_ARRAY_HOST %d-bytes "
+ "inited for dma'ing d2h-r indices\n", __FUNCTION__,
prot->d2h_dma_readindx_buf_len));
break;
/* Post event buffer after shim layer is attached */
ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+ if (ret <= 0) {
+ DHD_ERROR(("%s : Post event buffer fail. ret = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
/* Get the device rev info */
void *PKTBUF;
dmaaddr_t pa;
uint32 pa_len;
- PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len);
+ void *secdma;
+ PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma);
if (PKTBUF) {
- DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0);
+ {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ SECURE_DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0, secdma, 0);
+ } else
+ DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0);
+ }
PKTFREE(dhd->osh, PKTBUF, FALSE);
}
return;
void *PKTBUF;
dmaaddr_t pa;
uint32 pa_len;
- PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len);
+ void *secdma;
+ PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma);
if (PKTBUF) {
+ if (SECURE_DMA_ENAB(dhd->osh))
+ SECURE_DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0, secdma, 0);
+ else
DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0);
}
cnt--;
if (cnt == 0) {
/* find a better way to reschedule rx buf post if space not available */
- DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
- DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
+ DHD_ERROR(("%s: h2d rx post ring not available to post host buffers\n", __FUNCTION__));
+ DHD_ERROR(("%s: Current posted host buf count %d \n", __FUNCTION__, prot->rxbufpost));
break;
}
/* Create a rx buffer */
if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
- return -1;
+ break;
}
pktlen = PKTLEN(dhd->osh, p);
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0,
+ ring->secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else
physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+
if (PHYSADDRISZERO(physaddr)) {
- if (RING_WRITE_PTR(ring) < alloced - i)
- RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
- else
- RING_WRITE_PTR(ring) -= alloced - i;
- alloced = i;
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
+ ring->secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else
DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+
PKTFREE(dhd->osh, p, FALSE);
- DHD_ERROR(("Invalid phyaddr 0\n"));
+ DHD_ERROR(("%s: Invalid phyaddr 0\n", __FUNCTION__));
ASSERT(0);
break;
}
rxbuf_post->cmn_hdr.request_id =
htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr,
- pktlen, DMA_RX));
+ pktlen, DMA_RX, ring->secdma));
/* free lock */
DHD_GENERAL_UNLOCK(dhd, flags);
if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
- if (RING_WRITE_PTR(ring) < alloced - i)
- RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
- else
- RING_WRITE_PTR(ring) -= alloced - i;
- alloced = i;
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
+ ring->secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else
DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+
PKTFREE(dhd->osh, p, FALSE);
- DHD_ERROR(("Pktid pool depleted.\n"));
+ DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
break;
}
/* Move rxbuf_post_tmp to next item */
rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring);
}
+
+ if (i < alloced) {
+ if (RING_WRITE_PTR(ring) < (alloced - i))
+ RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - (alloced - i);
+ else
+ RING_WRITE_PTR(ring) -= (alloced - i);
+
+ alloced = i;
+ }
+
/* Update the write pointer in TCM & ring bell */
if (alloced > 0)
prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced);
uint16 alloced = 0;
unsigned long flags;
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return -1;
+ }
+
if (event_buf) {
/* Allocate packet for event buffer post */
pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
}
if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
- DHD_ERROR(("%s:%d: PKTGET for ctrl rxbuf failed\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("%s:%d: PKTGET for %s rxbuf failed\n",
+ __FUNCTION__, __LINE__, event_buf ?
+ "event" : "ioctl"));
return -1;
}
pktlen = PKTLEN(dhd->osh, p);
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+ DMA_RX, p, 0, prot->h2dring_ctrl_subn->secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else
physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+
if (PHYSADDRISZERO(physaddr)) {
- DHD_ERROR(("Invalid phyaddr 0\n"));
+ DHD_ERROR(("%s: Invalid phyaddr 0\n", __FUNCTION__));
ASSERT(0);
goto free_pkt_return;
}
prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
if (rxbuf_post == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
- __FUNCTION__, __LINE__));
+ DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer"
+ " for %s\n", __FUNCTION__, __LINE__, event_buf ? "event" :
+ "ioctl"));
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
+ prot->h2dring_ctrl_subn->secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else
DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+
goto free_pkt_return;
}
rxbuf_post->cmn_hdr.if_id = 0;
rxbuf_post->cmn_hdr.request_id =
- htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX));
+ htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX,
+ prot->h2dring_ctrl_subn->secdma));
if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0)
else
RING_WRITE_PTR(prot->h2dring_ctrl_subn)--;
DHD_GENERAL_UNLOCK(dhd, flags);
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
+ prot->h2dring_ctrl_subn->secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else
DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+
goto free_pkt_return;
}
uint32 i = 0;
int32 ret_val;
- DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
+ DHD_INFO(("%s: max to post %d, event %d\n", __FUNCTION__, max_to_post, event_buf));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return 0;
+ }
+
while (i < max_to_post) {
ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
if (ret_val < 0)
break;
i++;
}
- DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
+ DHD_INFO(("%s: posted %d buffers to event_pool/ioctl_resp_pool %d\n", __FUNCTION__, i, event_buf));
return (uint16)i;
}
dhd_prot_t *prot = dhd->prot;
uint16 retcnt = 0;
- DHD_INFO(("ioctl resp buf post\n"));
+ DHD_INFO(("%s: ioctl resp buf post\n", __FUNCTION__));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return 0;
+ }
+
retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE,
prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted);
prot->cur_ioctlresp_bufs_posted += retcnt;
- return 0;
+ return retcnt;
}
static int
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
- prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE,
+ uint16 retcnt = 0;
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return 0;
+ }
+
+ retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE,
prot->max_eventbufpost - prot->cur_event_bufs_posted);
- return 0;
+
+ prot->cur_event_bufs_posted += retcnt;
+ return retcnt;
}
-int BCMFASTPATH
-dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd)
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
{
dhd_prot_t *prot = dhd->prot;
+ bool more = TRUE;
+ uint n = 0;
/* Process all the messages - DTOH direction */
while (TRUE) {
/* Get the message from ring */
src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len);
- if (src_addr == NULL)
+ if (src_addr == NULL) {
+ more = FALSE;
break;
+ }
/* Prefetch data to populate the cache */
OSL_PREFETCH(src_addr);
DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
__FUNCTION__, src_len));
}
+
+ /* After batch processing, check RX bound */
+ n += src_len/RING_LEN_ITEMS(prot->d2hring_rx_cpln);
+ if (n >= bound) {
+ break;
+ }
}
- return 0;
+ return more;
}
void
ring->ringstate->r_offset = r_index;
}
- DHD_TRACE(("flow %d, write %d read %d \n\n", flow_id, RING_WRITE_PTR(ring),
+ DHD_TRACE(("%s: flow %d, write %d read %d \n\n", __FUNCTION__, flow_id, RING_WRITE_PTR(ring),
RING_READ_PTR(ring)));
/* Need more logic here, but for now use it directly */
}
-int BCMFASTPATH
-dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd)
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
{
dhd_prot_t *prot = dhd->prot;
+ bool more = TRUE;
+ uint n = 0;
/* Process all the messages - DTOH direction */
while (TRUE) {
uint16 src_len;
src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len);
- if (src_addr == NULL)
+ if (src_addr == NULL) {
+ more = FALSE;
break;
+ }
/* Prefetch data to populate the cache */
OSL_PREFETCH(src_addr);
/* Write to dngl rd ptr */
prot_upd_read_idx(dhd, prot->d2hring_tx_cpln);
+
+ /* After batch processing, check bound */
+ n += src_len/RING_LEN_ITEMS(prot->d2hring_tx_cpln);
+ if (n >= bound) {
+ break;
+ }
}
- return 0;
+ return more;
}
int BCMFASTPATH
return ret;
}
-#define PCIE_M2M_D2H_DMA_WAIT_TRIES 256
-#define PCIE_D2H_RESET_MARK 0xdeadbeef
-void dhd_msgbuf_d2h_check_cmplt(msgbuf_ring_t *ring, void *msg)
-{
- uint32 tries;
- uint32 *marker = (uint32 *)msg + RING_LEN_ITEMS(ring) / sizeof(uint32) - 1;
-
- for (tries = 0; tries < PCIE_M2M_D2H_DMA_WAIT_TRIES; tries++) {
- if (*(volatile uint32 *)marker != PCIE_D2H_RESET_MARK)
- return;
- OSL_CACHE_INV(msg, RING_LEN_ITEMS(ring));
- }
-
- /* only print error for data ring */
- if (ring->idx == BCMPCIE_D2H_MSGRING_TX_COMPLETE ||
- ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE)
- DHD_ERROR(("%s: stale msgbuf content after %d retries\n",
- __FUNCTION__, tries));
-}
-
static int BCMFASTPATH
dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
{
uint8 msgtype;
cmn_msg_hdr_t *msg = NULL;
int ret = BCME_OK;
+
+#if defined(PCIE_D2H_SYNC_BZERO)
uint8 *buf_head = buf;
+#endif /* PCIE_D2H_SYNC_BZERO */
ASSERT(ring && ring->ringmem);
msglen = RING_LEN_ITEMS(ring);
while (pktlen > 0) {
msg = (cmn_msg_hdr_t *)buf;
- dhd_msgbuf_d2h_check_cmplt(ring, msg);
-
+#if defined(PCIE_D2H_SYNC)
+ /* Wait until DMA completes, then fetch msgtype */
+ msgtype = dhd->prot->d2h_sync_cb(dhd, ring, msg, msglen);
+#else
msgtype = msg->msg_type;
+#endif /* !PCIE_D2H_SYNC */
- /* Prefetch data to populate the cache */
- OSL_PREFETCH(buf + msglen);
-
- DHD_INFO(("msgtype %d, msglen is %d, pktlen is %d \n",
+ DHD_INFO(("%s: msgtype %d, msglen is %d, pktlen is %d\n", __FUNCTION__,
msgtype, msglen, pktlen));
if (msgtype == MSG_TYPE_LOOPBACK) {
bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen);
- DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", msglen));
+ DHD_ERROR(("%s: MSG_TYPE_LOOPBACK, len %d\n", __FUNCTION__, msglen));
+ }
+
+
+ if (msgtype >= DHD_PROT_FUNCS) {
+ DHD_ERROR(("%s: msgtype %d, msglen is %d, pktlen is %d \n",
+ __FUNCTION__, msgtype, msglen, pktlen));
+ ret = BCME_ERROR;
+ goto done;
}
- ASSERT(msgtype < DHD_PROT_FUNCS);
if (table_lookup[msgtype]) {
table_lookup[msgtype](dhd, buf, msglen);
}
pktlen = pktlen - msglen;
buf = buf + msglen;
- if (msgtype == MSG_TYPE_RX_CMPLT)
- prot_early_upd_rxcpln_read_idx(dhd,
- dhd->prot->d2hring_rx_cpln);
+ if (ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE)
+ prot_early_upd_rxcpln_read_idx(dhd, ring);
}
done:
- OSL_CACHE_FLUSH(buf_head, len - pktlen);
+
+#if defined(PCIE_D2H_SYNC_BZERO)
+ OSL_CACHE_FLUSH(buf_head, len - pktlen); /* Flush the bzeroed msg */
+#endif /* PCIE_D2H_SYNC_BZERO */
#ifdef DHD_RX_CHAINING
dhd_rxchain_commit(dhd);
return ret;
}
+static void
+dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ return;
+}
+
static void
dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
{
pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf;
- DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n",
+ DHD_ERROR(("%s: ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n",
+ __FUNCTION__,
ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
/* How do we track this to pair it with ??? */
dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
{
pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf;
- DHD_ERROR(("gen status: request_id %d, status 0x%04x, flow ring %d \n",
+ DHD_ERROR(("%s: gen status: request_id %d, status 0x%04x, flow ring %d \n",
+ __FUNCTION__,
gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
gen_status->compl_hdr.flow_ring_id));
{
ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf;
- DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+ DHD_CTL(("%s: ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+ __FUNCTION__,
ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
ioct_ack->compl_hdr.flow_ring_id));
if (ioct_ack->compl_hdr.status != 0) {
- DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
+ DHD_ERROR(("%s: got an error status for the ioctl request...need to handle that\n",
+ __FUNCTION__));
}
+#if defined(PCIE_D2H_SYNC_BZERO)
memset(buf, 0, msglen);
- ioct_ack->marker = PCIE_D2H_RESET_MARK;
+#endif /* PCIE_D2H_SYNC_BZERO */
}
+
static void
dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
{
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
status = ioct_resp->compl_hdr.status;
+#if defined(PCIE_D2H_SYNC_BZERO)
memset(buf, 0, msglen);
- ioct_resp->marker = PCIE_D2H_RESET_MARK;
+#endif /* PCIE_D2H_SYNC_BZERO */
- DHD_CTL(("IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n",
+ DHD_CTL(("%s: IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n", __FUNCTION__,
pkt_id, xt_id, status, resp_len));
dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len);
unsigned long flags;
uint32 pktid;
void *pkt;
-
+ ulong pa;
+ uint32 pa_len;
+ void *secdma;
/* locks required to protect circular buffer accesses */
DHD_GENERAL_LOCK(dhd, flags);
txstatus = (host_txbuf_cmpl_t *)buf;
pktid = ltoh32(txstatus->cmn_hdr.request_id);
- DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+ DHD_INFO(("%s: txstatus for pktid 0x%04x\n", __FUNCTION__, pktid));
if (prot->active_tx_count)
prot->active_tx_count--;
else
- DHD_ERROR(("Extra packets are freed\n"));
+ DHD_ERROR(("%s: Extra packets are freed\n", __FUNCTION__));
ASSERT(pktid != 0);
- pkt = dhd_prot_packet_get(dhd, pktid);
+ pkt = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma);
if (pkt) {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ int offset = 0;
+ BCM_REFERENCE(offset);
+
+ if (dhd->prot->tx_metadata_offset)
+ offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
+ SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
+ (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, 0,
+ secdma, offset);
+ } else
+ DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, dmah);
+
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
#endif
PKTFREE(dhd->osh, pkt, TRUE);
}
+#if defined(PCIE_D2H_SYNC_BZERO)
memset(buf, 0, msglen);
- txstatus->marker = PCIE_D2H_RESET_MARK;
+#endif /* PCIE_D2H_SYNC_BZERO */
DHD_GENERAL_UNLOCK(dhd, flags);
void* pkt;
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
+ int post_cnt = 0;
+ bool zero_posted = FALSE;
/* Event complete header */
evnt = (wlevent_req_msg_t *)buf;
/* Post another rxbuf to the device */
if (prot->cur_event_bufs_posted)
prot->cur_event_bufs_posted--;
- dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+ else
+ zero_posted = TRUE;
+
+ post_cnt = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+ if (zero_posted && (post_cnt <= 0)) {
+ return;
+ }
+
+#if defined(PCIE_D2H_SYNC_BZERO)
memset(buf, 0, len);
- evnt->marker = PCIE_D2H_RESET_MARK;
+#endif /* PCIE_D2H_SYNC_BZERO */
/* locks required to protect pktid_map */
DHD_GENERAL_LOCK(dhd, flags);
return;
}
- DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
+ DHD_INFO(("%s: id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
+ __FUNCTION__,
ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
ltoh16(rxcmplt_h->metadata_len)));
current_phase = rxcmplt_h->cmn_hdr.flags;
}
if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)
- DHD_INFO(("D11 frame rxed \n"));
+ DHD_INFO(("%s: D11 frame rxed\n", __FUNCTION__));
/* data_offset from buf start */
if (data_offset) {
/* data offset given from dongle after split rx */
PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
ifidx = rxcmplt_h->cmn_hdr.if_id;
+
+#if defined(PCIE_D2H_SYNC_BZERO)
memset(buf, 0, msglen);
- rxcmplt_h->marker = PCIE_D2H_RESET_MARK;
+#endif /* PCIE_D2H_SYNC_BZERO */
#ifdef DHD_RX_CHAINING
/* Chain the packets */
host_txbuf_post_t *txdesc = NULL;
dmaaddr_t physaddr, meta_physaddr;
uint8 *pktdata;
- uint16 pktlen;
+ uint32 pktlen;
uint32 pktid;
uint8 prio;
uint16 flowid = 0;
uint16 headroom;
msgbuf_ring_t *msg_ring;
+ uint8 dhcp_pkt;
+
+ if (!dhd->flow_ring_table)
+ return BCME_NORESOURCE;
if (!dhd_bus_is_txmode_push(dhd->bus)) {
flow_ring_table_t *flow_ring_table;
/* Create a unique 32-bit packet id */
pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF);
if (pktid == DHD_PKTID_INVALID) {
- DHD_ERROR(("Pktid pool depleted.\n"));
+ DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
/*
* If we return error here, the caller would queue the packet
* again. So we'll just free the skb allocated in DMA Zone.
txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd,
msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
if (txdesc == NULL) {
+ void *secdma;
DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
__FUNCTION__, __LINE__, prot->active_tx_count));
/* Free up the PKTID */
PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr,
- pktlen);
+ pktlen, secdma);
goto err_no_res_pktfree;
}
+ /* test if dhcp pkt */
+ dhcp_pkt = pkt_is_dhcp(dhd->osh, PKTBUF);
+ txdesc->flag2 = (txdesc->flag2 & ~(BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK <<
+ BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT)) | ((dhcp_pkt &
+ BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK) << BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT);
+
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
- pktlen = (uint16)PKTLEN(dhd->osh, PKTBUF);
+ pktlen = PKTLEN(dhd->osh, PKTBUF);
/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
pktlen -= ETHER_HDR_LEN;
/* Map the data pointer to a DMA-able address */
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+
+ int offset = 0;
+ BCM_REFERENCE(offset);
+
+ if (prot->tx_metadata_offset)
+ offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
+
+ physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
+ DMA_TX, PKTBUF, 0, msg_ring->secdma, offset);
+ } else
physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+
if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) {
- DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+ DHD_ERROR(("%s: Something really bad, unless 0 is a valid phyaddr\n", __FUNCTION__));
ASSERT(0);
}
/* No need to lock. Save the rest of the packet's metadata */
NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid,
- physaddr, pktlen, DMA_TX);
+ physaddr, pktlen, DMA_TX, msg_ring->secdma);
#ifdef TXP_FLUSH_NITEMS
if (msg_ring->pend_items_count == 0)
txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
txdesc->seg_cnt = 1;
- txdesc->data_len = htol16(pktlen);
+ txdesc->data_len = htol16((uint16)pktlen);
txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
/* Handle Tx metadata */
headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
- DHD_ERROR(("No headroom for Metadata tx %d %d\n",
+ DHD_ERROR(("%s: No headroom for Metadata tx %d %d\n", __FUNCTION__,
prot->tx_metadata_offset, headroom));
if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
- DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
+ DHD_TRACE(("%s: Metadata in tx %d\n", __FUNCTION__, prot->tx_metadata_offset));
/* Adjust the data pointer to account for meta data in DMA_MAP */
PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ meta_physaddr = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+ prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
+ 0, msg_ring->secdma);
+ } else
meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+
if (PHYSADDRISZERO(meta_physaddr)) {
- DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+ DHD_ERROR(("%s: Something really bad, unless 0 is a valid phyaddr\n", __FUNCTION__));
ASSERT(0);
}
}
- DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
+ DHD_TRACE(("%s: txpost: data_len %d, pktid 0x%04x\n", __FUNCTION__, txdesc->data_len,
txdesc->cmn_hdr.request_id));
/* Update the write pointer in TCM & ring bell */
flow_ring_node_t *flow_ring_node;
msgbuf_ring_t *msg_ring;
+ if (!dhd->flow_ring_table)
+ return;
if (!in_lock) {
DHD_GENERAL_LOCK(dhd, flags);
goto done;
if (prot->pending == TRUE) {
- DHD_ERROR(("packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ DHD_ERROR(("%s: packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ __FUNCTION__,
ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
(unsigned long)prot->lastcmd));
if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
prot->dmaxfer.destmem.va, prot->dmaxfer.len);
}
else {
- DHD_INFO(("DMA successful\n"));
+ DHD_INFO(("%s: DMA successful\n", __FUNCTION__));
}
}
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
uint16 alloced = 0;
if (prot->dmaxfer_in_progress) {
- DHD_ERROR(("DMA is in progress...\n"));
+ DHD_ERROR(("%s: DMA is in progress...\n", __FUNCTION__));
return ret;
}
prot->dmaxfer_in_progress = TRUE;
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("DMA Started...\n"));
+ DHD_ERROR(("%s: DMA Started...\n", __FUNCTION__));
return BCME_OK;
}
}
ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+ if (ret < 0) {
+ DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret));
+ return ret;
+ }
- DHD_INFO(("ACTION %d ifdix %d cmd %d len %d \n",
+ DHD_INFO(("%s: ACTION %d ifdix %d cmd %d len %d \n", __FUNCTION__,
action, ifidx, cmd, len));
/* wait for interrupt and get first fragment */
void* pkt;
int retlen;
int msgbuf_len = 0;
+ int post_cnt = 0;
unsigned long flags;
+ bool zero_posted = FALSE;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return -1;
+ }
if (prot->cur_ioctlresp_bufs_posted)
prot->cur_ioctlresp_bufs_posted--;
+ else
+ zero_posted = TRUE;
+
+ post_cnt = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ if (zero_posted && (post_cnt <= 0)) {
+ return -1;
+ }
- dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ memset(&ioct_resp, 0, sizeof(ioctl_comp_resp_msg_t));
retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len);
if (retlen <= 0) {
- DHD_ERROR(("IOCTL request failed with error code %d\n", retlen));
+ DHD_ERROR(("%s: IOCTL request failed with error code %d\n", __FUNCTION__, retlen));
return retlen;
}
- DHD_INFO(("ioctl resp retlen %d status %d, resp_len %d, pktid %d\n",
+ DHD_INFO(("%s: ioctl resp retlen %d status %d, resp_len %d, pktid %d\n", __FUNCTION__,
retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len,
ioct_resp.cmn_hdr.request_id));
if (ioct_resp.resp_len != 0) {
pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id);
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_INFO(("ioctl ret buf %p retlen %d status %x \n", pkt, retlen,
+ DHD_INFO(("%s: ioctl ret buf %p retlen %d status %x\n", __FUNCTION__, pkt, retlen,
ioct_resp.compl_hdr.status));
/* get ret buf */
if ((buf) && (pkt)) {
/* Fill up msgbuf for ioctl req */
ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+ if (ret < 0) {
+ DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret));
+ return ret;
+ }
- DHD_INFO(("ACTIOn %d ifdix %d cmd %d len %d \n",
+ DHD_INFO(("%s: ACTIOn %d ifdix %d cmd %d len %d \n", __FUNCTION__,
action, ifidx, cmd, len));
ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
/* Add prot dump output to a buffer */
void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
-
+#if defined(PCIE_D2H_SYNC)
+ if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
+ bcm_bprintf(strbuf, "\nd2h_sync: SEQNUM:");
+ else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
+ bcm_bprintf(strbuf, "\nd2h_sync: XORCSUM:");
+ else
+ bcm_bprintf(strbuf, "\nd2h_sync: NONE:");
+ bcm_bprintf(strbuf, " d2h_sync_wait max<%lu> tot<%lu>\n",
+ dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
+#endif /* PCIE_D2H_SYNC */
}
/* Update local copy of dongle statistics */
ret_buf = prot_get_ring_space(ring, nitems, alloced);
if (ret_buf == NULL) {
- DHD_INFO(("%s: Ring space not available \n", ring->name));
+ DHD_INFO(("%s: RING space not available on ring %s for %d items \n", __FUNCTION__,
+ ring->name, nitems));
+ DHD_INFO(("%s: write %d read %d \n\n", __FUNCTION__, RING_WRITE_PTR(ring),
+ RING_READ_PTR(ring)));
return NULL;
}
}
ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
if (ioct_rqst == NULL) {
- DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+ DHD_ERROR(("%s: couldn't allocate space on msgring to send ioctl request\n", __FUNCTION__));
DHD_GENERAL_UNLOCK(dhd, flags);
return -1;
}
OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
if ((ulong)ioct_buf % DMA_ALIGN_LEN)
- DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+ DHD_ERROR(("%s: host ioct address unaligned !!!!! \n", __FUNCTION__));
- DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+ DHD_CTL(("%s: submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+ __FUNCTION__,
ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
ioct_rqst->trans_id));
uint alloced = 0;
msgbuf_ring_t *ring;
dmaaddr_t physaddr;
- uint16 size, cnt;
- uint32 *marker;
+ uint16 size;
ASSERT(name);
BCM_REFERENCE(physaddr);
size = max_item * len_item;
/* Ring Memmory allocation */
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
+ if (RING_IS_FLOWRING(ring)) {
+ ring->ring_base.va = DMA_ALLOC_CONSISTENT_STATIC(prot->osh,
+ size, DMA_ALIGN_LEN, &alloced, &ring->ring_base.pa,
+ &ring->ring_base.dmah, ringid);
+ } else
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN,
&alloced, &ring->ring_base.pa, &ring->ring_base.dmah);
ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
bzero(ring->ring_base.va, size);
- for (cnt = 0; cnt < max_item; cnt++) {
- marker = (uint32 *)ring->ring_base.va +
- (cnt + 1) * len_item / sizeof(uint32) - 1;
- *marker = PCIE_D2H_RESET_MARK;
- }
+
OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
/* Ring state init */
goto fail;
bzero(ring->ringstate, sizeof(*ring->ringstate));
- DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
- "ring start %p buf phys addr %x:%x \n",
+#ifdef BCM_SECURE_DMA
+ if (SECURE_DMA_ENAB(prot->osh)) {
+ ring->secdma = MALLOC(prot->osh, sizeof(sec_cma_info_t));
+ bzero(ring->secdma, sizeof(sec_cma_info_t));
+ if (ring->secdma == NULL) {
+ DHD_ERROR(("%s: MALLOC failure for secdma\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+#endif
+ DHD_INFO(("%s: RING_ATTACH : %s Max item %d len item %d total size %d "
+ "ring start %p buf phys addr %x:%x \n", __FUNCTION__,
ring->name, ring->ringmem->max_item, ring->ringmem->len_items,
size, ring->ring_base.va, ring->ringmem->base_addr.high_addr,
ring->ringmem->base_addr.low_addr));
return ring;
fail:
- if (ring->ring_base.va)
+ if (ring->ring_base.va && ring->ringmem) {
PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr);
PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr);
size = ring->ringmem->max_item * ring->ringmem->len_items;
DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL);
ring->ring_base.va = NULL;
+ }
if (ring->ringmem)
MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
size = ring->ringmem->max_item * ring->ringmem->len_items;
/* Free up ring */
if (ring->ring_base.va) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
+ if (RING_IS_FLOWRING(ring)) {
+ DMA_FREE_CONSISTENT_STATIC(prot->osh, ring->ring_base.va, size,
+ ring->ring_base.pa, ring->ring_base.dmah, ring->idx);
+ } else
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa,
ring->ring_base.dmah);
ring->ring_base.va = NULL;
MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t));
ring->ringstate = NULL;
}
+#ifdef BCM_SECURE_DMA
+ if (SECURE_DMA_ENAB(prot->osh)) {
+ DHD_ERROR(("%s:free secdma\n", __FUNCTION__));
+ SECURE_DMA_UNMAP_ALL(prot->osh, ring->secdma);
+ MFREE(prot->osh, ring->secdma, sizeof(sec_cma_info_t));
+ }
+#endif
/* free up ring info */
MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
RING_MAX_ITEM(ring));
if (ring_avail_cnt == 0) {
- DHD_INFO(("RING space not available on ring %s for %d items \n",
- ring->name, nitems));
- DHD_INFO(("write %d read %d \n\n", RING_WRITE_PTR(ring),
- RING_READ_PTR(ring)));
return NULL;
}
*alloced = MIN(nitems, ring_avail_cnt);
if (*available_len == 0)
return NULL;
- ASSERT(*available_len <= ring->ringmem->max_item);
+ if (*available_len > ring->ringmem->max_item) {
+ DHD_ERROR(("%s: *available_len %d, ring->ringmem->max_item %d\n",
+ __FUNCTION__, *available_len, ring->ringmem->max_item));
+ return NULL;
+ }
/* if space available, calculate address to be read */
ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items);
/* convert index to bytes */
*available_len = *available_len * ring->ringmem->len_items;
+ /* Cache invalidate */
+ OSL_CACHE_INV((void *) ret_addr, *available_len);
+
/* return read address */
return ret_addr;
}
dhd_prot_t *prot = dhd->prot;
uint16 msglen = sizeof(tx_flowring_delete_request_t);
unsigned long flags;
+ char eabuf[ETHER_ADDR_STR_LEN];
uint16 alloced = 0;
/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
flow_delete_rqst->reason = htol16(BCME_OK);
- DHD_ERROR(("%s sending FLOW RING Delete req msglen %d \n", __FUNCTION__, msglen));
+ bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf);
+ DHD_ERROR(("%s sending FLOW RING ID %d for peer %s prio %d ifindex %d"
+ " Delete req msglen %d\n", __FUNCTION__,
+ flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid,
+ flow_ring_node->flow_info.ifindex, msglen));
/* upd wrt ptr and raise interrupt */
prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst,
DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__,
flow_delete_resp->cmplt.status));
+#ifdef PCIE_TX_DEFERRAL
+ if (flow_delete_resp->cmplt.status != BCME_OK) {
+ DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
+ __FUNCTION__, flow_delete_resp->cmplt.status));
+ return;
+ }
+ set_bit(flow_delete_resp->cmplt.flow_ring_id, dhd->bus->delete_flow_map);
+ queue_work(dhd->bus->tx_wq, &dhd->bus->delete_flow_work);
+#else
dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
flow_delete_resp->cmplt.status);
+#endif /* PCIE_TX_DEFERRAL */
}
int
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_pcie.c 491657 2014-07-17 06:29:40Z $
+ * $Id: dhd_pcie.c 506043 2014-10-02 12:29:45Z $
*/
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
+#include <dhd_config.h>
#ifdef BCMEMBEDIMAGE
#include BCMEMBEDIMAGE
#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
+#if defined(SUPPORT_MULTIPLE_BOARD_REV)
+ extern unsigned int system_rev;
+#endif /* SUPPORT_MULTIPLE_BOARD_REV */
+
int dhd_dongle_memsize;
int dhd_dongle_ramsize;
#ifdef DHD_DEBUG
static int _dhdpcie_download_firmware(struct dhd_bus *bus);
static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
-static void dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
-static void dhdpci_bus_read_frames(dhd_bus_t *bus);
+static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
+static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
static int dhdpcie_readshared(dhd_bus_t *bus);
static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
+#ifdef CONFIG_ARCH_MSM8994
+static void dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data);
+static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset);
+#endif
static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
IOV_DUMP_RINGUPD_BLOCK,
IOV_DMA_RINGINDICES,
IOV_DB1_FOR_MB,
- IOV_FLOW_PRIO_MAP
+ IOV_FLOW_PRIO_MAP,
+ IOV_RXBOUND,
+ IOV_TXBOUND
};
{"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 },
{"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 },
{"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 },
+ {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
+ {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
{NULL, 0, 0, 0, 0 }
};
#define MAX_READ_TIMEOUT 5 * 1000 * 1000
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND 64
+#endif
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND 64
+#endif
+uint dhd_rxbound = DHD_RXBOUND;
+uint dhd_txbound = DHD_TXBOUND;
+
/* Register/Unregister functions are called by the main DHD entry
* point (e.g. module insertion) to link with the bus driver, in
* order to look for or await the device.
*
* 'tcm' is the *host* virtual address at which tcm is mapped.
*/
-dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm)
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm, uint32 tcm_size)
{
dhd_bus_t *bus;
bzero(bus, sizeof(dhd_bus_t));
bus->regs = regs;
bus->tcm = tcm;
+ bus->tcm_size = tcm_size;
bus->osh = osh;
dll_init(&bus->const_flowring);
/* Attach pcie shared structure */
bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
+ if (!bus->pcie_sh) {
+ DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
+ break;
+ }
/* dhd_common_init(osh); */
-
if (dhdpcie_dongle_attach(bus)) {
DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
break;
DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
+ if (bus && bus->pcie_sh)
+ MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+
+ if (bus)
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+
return NULL;
}
}
if (bus->dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s : bus is down. we have nothing to do\n",
+ DHD_TRACE(("%s : bus is down. we have nothing to do\n",
__FUNCTION__));
break;
}
DHD_TRACE(("%s: ENTER\n",
__FUNCTION__));
+
bus->alp_only = TRUE;
bus->sih = NULL;
/* Set bar0 window to si_enum_base */
dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
+#ifdef CONFIG_ARCH_MSM8994
+ /* Read bar1 window */
+ bus->bar1_win_base = OSL_PCI_READ_CONFIG(bus->osh, PCI_BAR1_WIN, 4);
+ DHD_ERROR(("%s: PCI_BAR1_WIN = %x\n", __FUNCTION__, bus->bar1_win_base));
+#endif
+
/* si_attach() will provide an SI handle and scan the backplane */
if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
&bus->vars, &bus->varsz))) {
/* WAR where the BAR1 window may not be sized properly */
W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
val = R_REG(osh, &sbpcieregs->configdata);
+#ifdef CONFIG_ARCH_MSM8994
+ bus->bar1_win_mask = 0xffffffff - (bus->tcm_size - 1);
+ DHD_ERROR(("%s: BAR1 window val=%d mask=%x\n", __FUNCTION__, val, bus->bar1_win_mask));
+#endif
W_REG(osh, &sbpcieregs->configdata, val);
/* Get info on the ARM and SOCRAM cores... */
bus->intr = (bool)dhd_intr;
bus->wait_for_d3_ack = 1;
+ bus->suspended = FALSE;
DHD_TRACE(("%s: EXIT: SUCCESS\n",
__FUNCTION__));
return 0;
void
dhdpcie_bus_intr_enable(dhd_bus_t *bus)
{
- DHD_TRACE(("enable interrupts\n"));
+ DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__));
if (!bus || !bus->sih)
return;
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
+void
+dhdpcie_bus_remove_prep(dhd_bus_t *bus)
+{
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+ dhd_os_sdlock(bus->dhd);
+
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ dhdpcie_bus_intr_disable(bus);
+ // terence 20150406: fix for null pointer handle
+ if (bus->sih)
+ pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
+
+ dhd_os_sdunlock(bus->dhd);
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
/* Detach and free everything */
void
if (bus->dhd) {
dongle_isolation = bus->dhd->dongle_isolation;
- dhd_detach(bus->dhd);
-
if (bus->intr) {
dhdpcie_bus_intr_disable(bus);
dhdpcie_free_irq(bus);
}
+ dhd_detach(bus->dhd);
dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
dhd_free(bus->dhd);
bus->dhd = NULL;
bus->regs = NULL;
}
if (bus->tcm) {
- dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
+ dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, bus->tcm_size);
bus->tcm = NULL;
}
dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
{
- DHD_TRACE(("%s Enter\n", __FUNCTION__));
-
DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
bus->dhd, bus->dhd->dongle_reset));
OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
}
si_detach(bus->sih);
+ // terence 20150420: fix for sih incorrectly handled in other function
+ bus->sih = NULL;
if (bus->vars && bus->varsz)
MFREE(osh, bus->vars, bus->varsz);
bus->vars = NULL;
OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
}
+#ifdef CONFIG_ARCH_MSM8994
+void
+dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data)
+{
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, data);
+}
+#endif
+
void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
{
/* Download firmware image and nvram image */
int
dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
- char *pfw_path, char *pnv_path)
+ char *pfw_path, char *pnv_path, char *pconf_path)
{
int ret;
bus->fw_path = pfw_path;
bus->nv_path = pnv_path;
+ bus->dhd->conf_path = pconf_path;
ret = dhdpcie_download_firmware(bus, osh);
DHD_OS_WAKE_LOCK(bus->dhd);
+ /* External conf takes precedence if specified */
+ dhd_conf_preinit(bus->dhd);
+ dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+ dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+
+ printk("Final fw_path=%s\n", bus->fw_path);
+ printk("Final nv_path=%s\n", bus->nv_path);
+ printk("Final conf_path=%s\n", bus->dhd->conf_path);
+
ret = _dhdpcie_download_firmware(bus);
DHD_OS_WAKE_UNLOCK(bus->dhd);
* entry or in module param.
*/
image = dhd_os_open_image(pfw_path);
- if (image == NULL)
+ if (image == NULL) {
+ printk("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
goto err;
+ }
memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
if (memblock == NULL) {
if (nvram_file_exists) {
image = dhd_os_open_image(pnv_path);
- if (image == NULL)
+ if (image == NULL) {
+ printk("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
goto err;
+ }
}
memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
/* Wait until control frame is available */
timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
- if (timeleft == 0) {
- DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
- bus->ioct_resp.cmn_hdr.request_id = 0;
- bus->ioct_resp.compl_hdr.status = 0xffff;
- bus->rxlen = 0;
- }
rxlen = bus->rxlen;
- bcopy(&bus->ioct_resp, msg, sizeof(ioctl_comp_resp_msg_t));
+ bcopy(&bus->ioct_resp, msg, MIN(rxlen, sizeof(ioctl_comp_resp_msg_t)));
bus->rxlen = 0;
if (rxlen) {
DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
} else if (timeleft == 0) {
DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+ bus->ioct_resp.cmn_hdr.request_id = 0;
+ bus->ioct_resp.compl_hdr.status = 0xffff;
+ bus->dhd->rxcnt_timeout++;
+ DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
} else if (pending == TRUE) {
DHD_CTL(("%s: canceled\n", __FUNCTION__));
return -ERESTARTSYS;
} else {
DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
}
- if (timeleft == 0) {
- bus->dhd->rxcnt_timeout++;
- DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
- }
- else
+
+ if (timeleft != 0)
bus->dhd->rxcnt_timeout = 0;
if (rxlen)
uint dsize;
int detect_endian_flag = 0x01;
bool little_endian;
+#ifdef CONFIG_ARCH_MSM8994
+ bool is_64bit_unaligned;
+#endif
/* Detect endianness. */
little_endian = *(char *)&detect_endian_flag;
+#ifdef CONFIG_ARCH_MSM8994
+ /* Check 64bit aligned or not. */
+ is_64bit_unaligned = (address & 0x7);
+#endif
/* In remap mode, adjust address beyond socram and redirect
* to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
* is not backplane accessible
/* Do the transfer(s) */
if (write) {
while (size) {
- if (size >= sizeof(uint64) && little_endian)
+ if (size >= sizeof(uint64) && little_endian) {
+#ifdef CONFIG_ARCH_MSM8994
+ if (is_64bit_unaligned) {
+ DHD_INFO(("%s: write unaligned %lx\n",
+ __FUNCTION__, address));
+ dhdpcie_bus_wtcm32(bus, address, *((uint32 *)data));
+ data += 4;
+ size -= 4;
+ address += 4;
+ is_64bit_unaligned = (address & 0x7);
+ continue;
+ }
+ else
+#endif
dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
- else {
+ } else {
dsize = sizeof(uint8);
dhdpcie_bus_wtcm8(bus, address, *data);
}
}
} else {
while (size) {
- if (size >= sizeof(uint64) && little_endian)
+ if (size >= sizeof(uint64) && little_endian) {
+#ifdef CONFIG_ARCH_MSM8994
+ if (is_64bit_unaligned) {
+ DHD_INFO(("%s: read unaligned %lx\n",
+ __FUNCTION__, address));
+ *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
+ data += 4;
+ size -= 4;
+ address += 4;
+ is_64bit_unaligned = (address & 0x7);
+ continue;
+ }
+ else
+#endif
*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
- else {
+ } else {
dsize = sizeof(uint8);
*data = dhdpcie_bus_rtcm8(bus, address);
}
queue = &flow_ring_node->queue; /* queue associated with flow ring */
- DHD_QUEUE_LOCK(queue->lock, flags);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ return BCME_NOTREADY;
+ }
while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTORPHAN(txp);
#ifdef DHDTCPACK_SUPPRESS
- dhd_tcpack_check_xmit(bus->dhd, txp);
+ if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+ dhd_tcpack_check_xmit(bus->dhd, txp);
+ }
#endif /* DHDTCPACK_SUPPRESS */
/* Attempt to transfer packet over flow ring */
dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
/* reinsert at head */
dhd_flow_queue_reinsert(bus->dhd, queue, txp);
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
/* If we are able to requeue back, return success */
return BCME_OK;
dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
return ret;
}
+#ifndef PCIE_TX_DEFERRAL
/* Send a data frame to the dongle. Callee disposes of txp. */
int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
queue = &flow_ring_node->queue; /* queue associated with flow ring */
- DHD_QUEUE_LOCK(queue->lock, flags);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
txp_pend = txp;
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
if (flow_ring_node->status) {
DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
/* If we have anything pending, try to push into q */
if (txp_pend) {
- DHD_QUEUE_LOCK(queue->lock, flags);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
txp = txp_pend;
goto toss;
}
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
return ret;
PKTCFREE(bus->dhd->osh, txp, TRUE);
return ret;
}
+#else /* PCIE_TX_DEFERRAL */
+int BCMFASTPATH
+dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
+{
+ unsigned long flags;
+ int ret = BCME_OK;
+ uint16 flowid;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ uint8 *pktdata = (uint8 *)PKTDATA(bus->dhd->osh, txp);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+
+ if (!bus->dhd->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ goto toss;
+ }
+
+ flowid = dhd_flowid_find(bus->dhd, ifidx,
+ bus->dhd->flow_prio_map[(PKTPRIO(txp))],
+ eh->ether_shost, eh->ether_dhost);
+ if (flowid == FLOWID_INVALID) {
+ DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
+ skb_queue_tail(&bus->orphan_list, txp);
+ queue_work(bus->tx_wq, &bus->create_flow_work);
+ return BCME_OK;
+ }
+
+ DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), flowid);
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ DHD_DATA(("%s: pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ if ((flowid >= bus->dhd->num_flow_rings) ||
+ (!flow_ring_node->active) ||
+ (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_DATA(("%s: Dropping pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ ret = BCME_ERROR;
+ goto toss;
+ }
+
+ if (flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
+ skb_queue_tail(&bus->orphan_list, txp);
+ queue_work(bus->tx_wq, &bus->create_flow_work);
+ return BCME_OK;
+ }
+
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ goto toss;
+ }
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+ return ret;
+
+toss:
+ DHD_DATA(("%s: Toss %d\n", __FUNCTION__, ret));
+ PKTCFREE(bus->dhd->osh, txp, TRUE);
+ return ret;
+}
+#endif /* !PCIE_TX_DEFERRAL */
void
dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
}
+#ifdef CONFIG_ARCH_MSM8994
+static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset)
+{
+ uint new_bar1_wbase = 0;
+ ulong address = 0;
+
+ new_bar1_wbase = (uint)offset & bus->bar1_win_mask;
+ if (bus->bar1_win_base != new_bar1_wbase) {
+ bus->bar1_win_base = new_bar1_wbase;
+ dhdpcie_bus_cfg_set_bar1_win(bus, bus->bar1_win_base);
+ DHD_ERROR(("%s: offset=%lx, switch bar1_win_base to %x\n",
+ __FUNCTION__, offset, bus->bar1_win_base));
+ }
+
+ address = offset - bus->bar1_win_base;
+
+ return address;
+}
+#else
+#define dhd_bus_cmn_check_offset(x, y) y
+#endif /* CONFIG_ARCH_MSM8994 */
+
/** 'offset' is a backplane address */
void
dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
{
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
- *(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint8)data;
}
uint8
dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
{
volatile uint8 data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+ data = R_REG(bus->dhd->osh,
+ (volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
#else
- data = *(volatile uint8 *)(bus->tcm + offset);
+ data = *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
#endif
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
return data;
}
void
dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
{
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
- *(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint32)data;
}
void
dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
{
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
- *(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint16)data;
}
void
dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
{
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
- *(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint64)data;
}
uint16
dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
{
volatile uint16 data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+ data = R_REG(bus->dhd->osh,
+ (volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
#else
- data = *(volatile uint16 *)(bus->tcm + offset);
+ data = *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
#endif
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
return data;
}
dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
{
volatile uint32 data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+ data = R_REG(bus->dhd->osh,
+ (volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
#else
- data = *(volatile uint32 *)(bus->tcm + offset);
+ data = *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
#endif
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
return data;
}
dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
{
volatile uint64 data;
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+ data = R_REG(bus->dhd->osh,
+ (volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
#else
- data = *(volatile uint64 *)(bus->tcm + offset);
+ data = *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
#endif
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
return data;
}
break;
}
default:
- printf("Maximum one argument supported\n");
+ printf("%s: Maximum one argument supported\n", __FUNCTION__);
break;
}
bytes += sprintf(p + bytes, "\n");
}
if (total == 0U) {
- printf("buzzz_dump total<%u> done\n", total);
+ printf("%s: buzzz_dump total<%u> done\n", __FUNCTION__, total);
return;
} else {
- printf("buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
+ printf("%s: buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
total, part2, part1);
}
log = (void*)((size_t)log + buzzz_p->log_sz);
}
- printf("buzzz_dump done.\n");
+ printf("%s: buzzz_dump done.\n", __FUNCTION__);
}
int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
return BCME_UNSUPPORTED;
}
if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
- printf("Page memory allocation failure\n");
+ printf("%s: Page memory allocation failure\n", __FUNCTION__);
goto done;
}
if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
- printf("Buzzz memory allocation failure\n");
+ printf("%s: Buzzz memory allocation failure\n", __FUNCTION__);
goto done;
}
dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
(uint8 *)buzzz_p, sizeof(buzzz_t));
if (buzzz_p->count == 0) {
- printf("Empty dongle BUZZZ trace\n\n");
+ printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
goto done;
}
if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
- printf("Counters<%u> mismatch\n", buzzz_p->counters);
+ printf("%s: Counters<%u> mismatch\n", __FUNCTION__, buzzz_p->counters);
goto done;
}
/* Allocate memory for trace buffer and format strings */
buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
if (buffer_p == NULL) {
- printf("Buffer memory allocation failure\n");
+ printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
goto done;
}
/* Fetch the trace and format strings */
dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
(uint8 *)buffer_p, buzzz_p->buffer_sz);
/* Process and display the trace using formatted output */
- printf("<#cycle> <#instruction> <#ctr3> <event information>\n");
+ printf("%s: <#cycle> <#instruction> <#ctr3> <event information>\n", __FUNCTION__);
dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
- printf("----- End of dongle BUZZZ Trace -----\n\n");
+ printf("%s: ----- End of dongle BUZZZ Trace -----\n\n", __FUNCTION__);
MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
}
}
if (i >= pcie_serdes_spinwait) {
- DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
+ DHD_ERROR(("%s: pcie_mdiosetblock: timed out\n", __FUNCTION__));
return FALSE;
}
DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
bus->dhd->up = FALSE;
if (bus->dhd->busstate != DHD_BUS_DOWN) {
- dhd_prot_clear(dhdp);
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* Clean up any pending host wake IRQ */
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
+ dhd_bus_oob_intr_unregister(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
dhd_os_wd_timer(dhdp, 0);
dhd_bus_stop(bus, TRUE);
-#ifdef CONFIG_ARCH_MSM
+ dhd_prot_clear(dhdp);
+ dhd_clear(dhdp);
dhd_bus_release_dongle(bus);
-#endif /* CONFIG_ARCH_MSM */
dhdpcie_bus_free_resource(bus);
bcmerror = dhdpcie_bus_disable_device(bus);
if (bcmerror) {
#endif /* CONFIG_ARCH_MSM */
bus->dhd->busstate = DHD_BUS_DOWN;
} else {
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* Clean up any pending host wake IRQ */
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
+ dhd_bus_oob_intr_unregister(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
dhd_prot_clear(dhdp);
-#ifdef CONFIG_ARCH_MSM
+ dhd_clear(dhdp);
dhd_bus_release_dongle(bus);
-#endif /* CONFIG_ARCH_MSM */
dhdpcie_bus_free_resource(bus);
bcmerror = dhdpcie_bus_disable_device(bus);
if (bcmerror) {
/* Powering On */
DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
#ifdef CONFIG_ARCH_MSM
- while (retry--) {
+ while (--retry) {
bcmerror = dhdpcie_bus_clock_start(bus);
if (!bcmerror) {
DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
bcmerror = dhdpcie_bus_dongle_attach(bus);
if (bcmerror) {
- DHD_ERROR(("%s: dhdpcie_bus_dongle_attach: %d\n",
+ DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bcmerror = dhd_bus_request_irq(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
__FUNCTION__, bcmerror));
goto done;
}
{
uint val;
if (!PCIE_GEN2(bus->sih)) {
- DHD_ERROR(("supported only in pcie gen2\n"));
+ DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
bcmerror = BCME_ERROR;
break;
}
bcopy(&val, arg, sizeof(int32));
}
else {
- DHD_ERROR(("pcie2_mdioop failed.\n"));
+ DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
bcmerror = BCME_ERROR;
}
break;
}
case IOV_SVAL(IOV_PCIESERDESREG):
if (!PCIE_GEN2(bus->sih)) {
- DHD_ERROR(("supported only in pcie gen2\n"));
+ DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
bcmerror = BCME_ERROR;
break;
}
if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
- DHD_ERROR(("pcie2_mdioop failed.\n"));
+ DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
bcmerror = BCME_ERROR;
}
break;
/* Can change it only during initialization/FW download */
if (bus->dhd->busstate == DHD_BUS_DOWN) {
if ((int_val > 3) || (int_val < 0)) {
- DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
+ DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
bcmerror = BCME_BADARG;
} else {
bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
bcopy(&int_val, arg, val_size);
break;
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
default:
bcmerror = BCME_UNSUPPORTED;
break;
dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
{
if (bus->dhd == NULL) {
- DHD_ERROR(("bus not inited\n"));
+ DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
return 0;
}
if (bus->dhd->prot == NULL) {
- DHD_ERROR(("prot is not inited\n"));
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
return 0;
}
if (bus->dhd->busstate != DHD_BUS_DATA) {
- DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
return 0;
}
dhdmsgbuf_lpbk_req(bus->dhd, len);
}
int
-dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
+dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
{
int timeleft;
int rc = 0;
if (bus->dhd == NULL) {
- DHD_ERROR(("bus not inited\n"));
+ DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
return BCME_ERROR;
}
if (bus->dhd->prot == NULL) {
- DHD_ERROR(("prot is not inited\n"));
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
return BCME_ERROR;
}
if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
- DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
return BCME_ERROR;
}
if (bus->dhd->dongle_reset)
return -EIO;
- if (state == (bus->dhd->busstate == DHD_BUS_SUSPEND)) /* Set to same state */
+ if (bus->suspended == state) /* Set to same state */
return BCME_OK;
if (state) {
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_set_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
bus->wait_for_d3_ack = 0;
+ bus->suspended = TRUE;
+ bus->dhd->busstate = DHD_BUS_SUSPEND;
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhd_os_set_ioctl_resp_timeout(DEFAULT_IOCTL_RESP_TIMEOUT);
dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
if (bus->wait_for_d3_ack) {
/* Got D3 Ack. Suspend the bus */
- rc = dhdpcie_pci_suspend_resume(bus->dev, state);
- bus->dhd->busstate = DHD_BUS_SUSPEND;
+ if (dhd_os_check_wakelock_all(bus->dhd)) {
+ DHD_ERROR(("%s: Suspend failed because of wakelock\n", __FUNCTION__));
+ bus->dev->current_state = PCI_D3hot;
+ pci_set_master(bus->dev);
+ rc = pci_set_power_state(bus->dev, PCI_D0);
+ if (rc) {
+ DHD_ERROR(("%s: pci_set_power_state failed:"
+ " current_state[%d], ret[%d]\n",
+ __FUNCTION__, bus->dev->current_state, rc));
+ }
+ bus->suspended = FALSE;
+ bus->dhd->busstate = DHD_BUS_DATA;
+ rc = BCME_ERROR;
+ } else {
+ dhdpcie_bus_intr_disable(bus);
+ rc = dhdpcie_pci_suspend_resume(bus, state);
+ }
} else if (timeleft == 0) {
DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
- return -ETIMEDOUT;
+ bus->dev->current_state = PCI_D3hot;
+ pci_set_master(bus->dev);
+ rc = pci_set_power_state(bus->dev, PCI_D0);
+ if (rc) {
+ DHD_ERROR(("%s: pci_set_power_state failed:"
+ " current_state[%d], ret[%d]\n",
+ __FUNCTION__, bus->dev->current_state, rc));
+ }
+ bus->suspended = FALSE;
+ bus->dhd->busstate = DHD_BUS_DATA;
+ rc = -ETIMEDOUT;
}
bus->wait_for_d3_ack = 1;
- }
- else {
+ } else {
/* Resume */
- rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ rc = dhdpcie_pci_suspend_resume(bus, state);
+ bus->suspended = FALSE;
bus->dhd->busstate = DHD_BUS_DATA;
-
+ dhdpcie_bus_intr_enable(bus);
}
-#ifdef EXYNOS5433_PCIE_WAR
- exynos_pcie_clear_l1_exit();
-#endif /* EXYNOS5433_PCIE_WAR */
return rc;
}
dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
{
if (bus->dhd == NULL) {
- DHD_ERROR(("bus not inited\n"));
+ DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
return BCME_ERROR;
}
if (bus->dhd->prot == NULL) {
- DHD_ERROR(("prot is not inited\n"));
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
return BCME_ERROR;
}
if (bus->dhd->busstate != DHD_BUS_DATA) {
- DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
return BCME_ERROR;
}
if (len < 5 || len > 4194296) {
- DHD_ERROR(("len is too small or too large\n"));
+ DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
return BCME_ERROR;
}
return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
/* Implement read back and verify later */
#ifdef DHD_DEBUG
/* Verify NVRAM bytes */
- DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
if (!nvram_ularray)
return BCME_NOMEM;
phys_size += bus->dongle_ram_base;
/* adjust to the user specified RAM */
- DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
phys_size, bus->ramsize));
- DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
varaddr, varsize));
varsize = ((phys_size - 4) - varaddr);
varsizew = htol32(varsizew);
}
- DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+ DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
/* Write the length token to the last word */
bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
return bcmerror;
}
+#ifndef BCMPCIE_OOB_HOST_WAKE
+/* loop through the capability list and see if the pcie capabilty exists */
+uint8
+dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
+{
+ uint8 cap_id;
+ uint8 cap_ptr = 0;
+ uint8 byte_val;
+
+ /* check for Header type 0 */
+ byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
+ if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
+ DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
+ goto end;
+ }
+
+ /* check if the capability pointer field exists */
+ byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
+ if (!(byte_val & PCI_CAPPTR_PRESENT)) {
+ DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
+ goto end;
+ }
+
+ cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
+ /* check if the capability pointer is 0x00 */
+ if (cap_ptr == 0x00) {
+ DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
+ goto end;
+ }
+
+ /* loop thr'u the capability list and see if the pcie capabilty exists */
+
+ cap_id = read_pci_cfg_byte(cap_ptr);
+
+ while (cap_id != req_cap_id) {
+ cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
+ if (cap_ptr == 0x00) break;
+ cap_id = read_pci_cfg_byte(cap_ptr);
+ }
+
+end:
+ return cap_ptr;
+}
+
+void
+dhdpcie_pme_active(osl_t *osh, bool enable)
+{
+ uint8 cap_ptr;
+ uint32 pme_csr;
+
+ cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
+
+ if (!cap_ptr) {
+ DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
+ return;
+ }
+
+ pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
+ DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
+
+ pme_csr |= PME_CSR_PME_STAT;
+ if (enable) {
+ pme_csr |= PME_CSR_PME_EN;
+ } else {
+ pme_csr &= ~PME_CSR_PME_EN;
+ }
+
+ OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
/* Add bus dump output to a buffer */
void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
next = dll_next_p(item);
flow_ring_node = dhd_constlist_to_flowring(item);
- ASSERT(flow_ring_node->active);
dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
}
}
{
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
- DHD_ERROR(("mailbox communication not supported\n"));
+ DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
return;
}
if (bus->db1_for_mb) {
/* this is a pcie core register, not the config regsiter */
- DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
+ DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
}
else {
- DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
+ DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
}
si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
} else {
/* this is a pcie core register, not the config regsiter */
- DHD_INFO(("writing a door bell to the device\n"));
+ DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
}
}
return 0;
}
- if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
- resched = TRUE;
- DHD_ERROR(("%s : pcie is still in suspend state!!!\n", __FUNCTION__));
- OSL_DELAY(20 * 1000); /* 20ms */
- return resched;
- }
-
intstatus = bus->intstatus;
if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
intstatus |= newstatus;
bus->intstatus = 0;
if (intstatus & I_MB) {
- dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+ resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
}
} else {
/* this is a PCIE core register..not a config register... */
newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
intstatus |= (newstatus & bus->def_intmask);
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, intstatus, intstatus);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, newstatus, newstatus);
if (intstatus & bus->def_intmask) {
- dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+ resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
intstatus &= ~bus->def_intmask;
}
}
- dhdpcie_bus_intr_enable(bus);
+ if (!resched) {
+ // terence 20150420: no need to enable interrupt if busstate is down
+ if (bus->dhd->busstate) {
+ dhdpcie_bus_intr_enable(bus);
+ }
+ }
return resched;
}
if (cur_h2d_mb_data != 0) {
uint32 i = 0;
- DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
+ DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
while ((i++ < 100) && cur_h2d_mb_data) {
OSL_DELAY(10);
dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
}
if (i >= 100)
- DHD_ERROR(("waited 1ms for the dngl to ack the previous mb transaction\n"));
+ DHD_ERROR(("%s: waited 1ms for the dngl to ack the previous mb transaction\n", __FUNCTION__));
}
dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
- DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
+ DHD_INFO(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
/* what should we do */
- DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+ DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
- DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
+ DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
}
if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
/* what should we do */
- DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+ DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
}
if (d2h_mb_data & D2H_DEV_D3_ACK) {
/* what should we do */
}
}
if (d2h_mb_data & D2H_DEV_FWHALT) {
- DHD_INFO(("FW trap has happened\n"));
+ DHD_INFO(("%s: FW trap has happened\n", __FUNCTION__));
#ifdef DHD_DEBUG
dhdpcie_checkdied(bus, NULL, 0);
#endif
}
}
-static void
+static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
{
+ bool resched = FALSE;
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
/* Msg stream interrupt */
if (intstatus & I_BIT1) {
- dhdpci_bus_read_frames(bus);
+ resched = dhdpci_bus_read_frames(bus);
} else if (intstatus & I_BIT0) {
/* do nothing for Now */
}
else {
if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
dhdpcie_handle_mb_data(bus);
+
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+ goto exit;
+ }
+
if (intstatus & PCIE_MB_D2H_MB_MASK) {
- dhdpci_bus_read_frames(bus);
+ resched = dhdpci_bus_read_frames(bus);
}
}
+exit:
+ return resched;
}
/* Decode dongle to host message stream */
-static void
+static bool
dhdpci_bus_read_frames(dhd_bus_t *bus)
{
+ bool more = FALSE;
+
/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
-
dhd_prot_process_ctrlbuf(bus->dhd);
+ /* Unlock to give chance for resp to be handled */
+ DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+ DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
/* update the flow ring cpls */
dhd_update_txflowrings(bus->dhd);
- dhd_prot_process_msgbuf_txcpl(bus->dhd);
-
- dhd_prot_process_msgbuf_rxcpl(bus->dhd);
+ /* With heavy TX traffic, we could get a lot of TxStatus
+ * so add bound
+ */
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
+ /* With heavy RX traffic, this routine potentially could spend some time
+ * processing RX frames without RX bound
+ */
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+
+ return more;
}
static int
(addr > shaddr)) {
DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
__FUNCTION__, addr));
- DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
+ DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
return BCME_ERROR;
} else {
bus->shared_addr = (ulong)addr;
- DHD_ERROR(("PCIe shared addr read took %u usec "
- "before dongle is ready\n", tmo.elapsed));
+ DHD_ERROR(("%s: PCIe shared addr read took %u usec "
+ "before dongle is ready\n", __FUNCTION__, tmo.elapsed));
}
/* Read hndrte_shared structure */
if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
sizeof(pciedev_shared_t))) < 0) {
- DHD_ERROR(("Failed to read PCIe shared struct,"
- "size read %d < %d\n", rv, (int)sizeof(pciedev_shared_t)));
+ DHD_ERROR(("%s: Failed to read PCIe shared struct,"
+ "size read %d < %d\n", __FUNCTION__, rv, (int)sizeof(pciedev_shared_t)));
return rv;
}
bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
- DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
+ DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
DHD_ERROR(("%s: pcie_shared version %d in dhd "
} else
bus->txmode_push = FALSE;
}
- DHD_ERROR(("bus->txmode_push is set to %d\n", bus->txmode_push));
+ DHD_ERROR(("%s: bus->txmode_push is set to %d\n", __FUNCTION__, bus->txmode_push));
/* Does the FW support DMA'ing r/w indices */
if (sh->flags & PCIE_SHARED_DMA_INDEX) {
dhd_fillup_ring_sharedptr_info(bus, &ring_info);
bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
- DHD_INFO(("ring_info\n"));
+ DHD_INFO(("%s: ring_info\n", __FUNCTION__));
- DHD_ERROR(("max H2D queues %d\n", ltoh16(ring_info.max_sub_queues)));
+ DHD_ERROR(("%s: max H2D queues %d\n", __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
- DHD_INFO(("mail box address\n"));
- DHD_INFO(("h2d_mb_data_ptr_addr 0x%04x\n", bus->h2d_mb_data_ptr_addr));
- DHD_INFO(("d2h_mb_data_ptr_addr 0x%04x\n", bus->d2h_mb_data_ptr_addr));
+ DHD_INFO(("%s: mail box address\n", __FUNCTION__));
+ DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->h2d_mb_data_ptr_addr));
+ DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->d2h_mb_data_ptr_addr));
}
+
+ bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
+ DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode));
+
return BCME_OK;
}
/* Read ring mem and ring state ptr info from shared are in TCM */
bus->ring_sh[i].ring_mem_addr = tcm_memloc;
/* Update mem block */
tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
- DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
+ DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
i, bus->ring_sh[i].ring_mem_addr));
}
/* Tx flow Ring */
if (bus->txmode_push) {
bus->ring_sh[i].ring_mem_addr = tcm_memloc;
- DHD_INFO(("TX ring ring id %d ring mem addr 0x%04x \n",
+ DHD_INFO(("%s: TX ring ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
i, bus->ring_sh[i].ring_mem_addr));
}
}
h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
- DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
+ DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
}
/* Store d2h common ring write/read pointers */
d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
- DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+ DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
}
bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
- DHD_INFO(("txflow : idx %d write %x read %x \n", i,
+ DHD_INFO(("%s: txflow : idx %d write %x read %x \n", __FUNCTION__, i,
bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
} else {
for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
- DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
+ DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
+ __FUNCTION__, i,
bus->ring_sh[i].ring_state_w,
bus->ring_sh[i].ring_state_r));
}
}
}
}
+
/* Initialize bus module: prepare for communication w/dongle */
int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
{
return bus->txmode_push;
}
-void dhd_bus_clean_flow_ring(dhd_bus_t *bus, uint16 flowid)
+void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
{
void *pkt;
flow_queue_t *queue;
- flow_ring_node_t *flow_ring_node;
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
unsigned long flags;
- flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
- ASSERT(flow_ring_node->flowid == flowid);
-
queue = &flow_ring_node->queue;
- /* Call Flow ring clean up */
- dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
- dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
- flow_ring_node->flowid);
-
- /* clean up BUS level info */
- DHD_QUEUE_LOCK(queue->lock, flags);
-
#ifdef DHDTCPACK_SUPPRESS
/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
* when there is a newly coming packet from network stack.
*/
dhd_tcpack_info_tbl_clean(bus->dhd);
#endif /* DHDTCPACK_SUPPRESS */
+
+ /* clean up BUS level info */
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
/* Flush all pending packets in the queue, if any */
while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTFREE(bus->dhd->osh, pkt, TRUE);
}
ASSERT(flow_queue_empty(queue));
- DHD_QUEUE_UNLOCK(queue->lock, flags);
-
+ flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
flow_ring_node->active = FALSE;
-
dll_delete(&flow_ring_node->list);
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Call Flow ring clean up */
+ dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
+ dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+ flow_ring_node->flowid);
+
}
/*
DHD_INFO(("%s :Flow create\n", __FUNCTION__));
/* Send Msg to device about flow ring creation */
- dhd_prot_flow_ring_create(bus->dhd, flow_ring_node);
-
- flow_ring_node->status = FLOW_RING_STATUS_PENDING;
-
- dll_prepend(&bus->const_flowring, &flow_ring_node->list);
+ if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
+ return BCME_NOMEM;
return BCME_OK;
}
dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
{
flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
DHD_ERROR(("%s Flow create Response failure error status = %d \n",
__FUNCTION__, status));
/* Call Flow clean up */
- dhd_bus_clean_flow_ring(bus, flowid);
+ dhd_bus_clean_flow_ring(bus, flow_ring_node);
return;
}
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
dhd_bus_schedule_queue(bus, flowid, FALSE);
flow_ring_node = (flow_ring_node_t *)arg;
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
return BCME_ERROR;
}
+ flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
queue = &flow_ring_node->queue; /* queue associated with flow ring */
- DHD_QUEUE_LOCK(queue->lock, flags);
-
#ifdef DHDTCPACK_SUPPRESS
/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
* when there is a newly coming packet from network stack.
}
ASSERT(flow_queue_empty(queue));
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
/* Send Msg to device about flow ring deletion */
dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
- flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
return BCME_OK;
}
return;
}
/* Call Flow clean up */
- dhd_bus_clean_flow_ring(bus, flowid);
+ dhd_bus_clean_flow_ring(bus, flow_ring_node);
- flow_ring_node->status = FLOW_RING_STATUS_OPEN;
- flow_ring_node->active = FALSE;
return;
}
flow_ring_node = (flow_ring_node_t *)arg;
queue = &flow_ring_node->queue; /* queue associated with flow ring */
- DHD_QUEUE_LOCK(queue->lock, flags);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
#ifdef DHDTCPACK_SUPPRESS
/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
}
ASSERT(flow_queue_empty(queue));
- DHD_QUEUE_UNLOCK(queue->lock, flags);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
/* Send Msg to device about flow ring flush */
dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
dhdpcie_free_resource(bus);
}
+int
+dhd_bus_request_irq(struct dhd_bus *bus)
+{
+ return dhdpcie_bus_request_irq(bus);
+}
+
bool
dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
{
return 0;
}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+ return dhdpcie_oob_intr_register(dhdp->bus);
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+ dhdpcie_oob_intr_unregister(dhdp->bus);
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+ dhdpcie_oob_intr_set(dhdp->bus, enable);
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_pcie.h 491657 2014-07-17 06:29:40Z $
+ * $Id: dhd_pcie.h 506084 2014-10-02 15:34:59Z $
*/
#include <bcmpcie.h>
#include <hnd_cons.h>
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#ifdef CONFIG_ARCH_MSM8994
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
/* defines */
uint32 dma_rxoffset;
volatile char *regs; /* pci device memory va */
volatile char *tcm; /* pci device memory va */
+ uint32 tcm_size;
+#ifdef CONFIG_ARCH_MSM8994
+ uint32 bar1_win_base;
+ uint32 bar1_win_mask;
+#endif
osl_t *osh;
uint32 nvram_csm; /* Nvram checksum */
uint16 pollrate;
uint8 txmode_push;
uint32 max_sub_queues;
bool db1_for_mb;
-
+ bool suspended;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ struct msm_pcie_register_event pcie_event;
+ bool islinkdown;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#ifdef PCIE_TX_DEFERRAL
+ struct workqueue_struct *tx_wq;
+ struct work_struct create_flow_work;
+ struct work_struct delete_flow_work;
+ unsigned long *delete_flow_map;
+ struct sk_buff_head orphan_list;
+#endif /* PCIE_TX_DEFERRAL */
+ bool irq_registered;
} dhd_bus_t;
/* function declarations */
extern void dhdpcie_bus_unregister(void);
extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
-extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm);
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs,
+ volatile char* tcm, uint32 tcm_size);
extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
+extern void dhdpcie_bus_remove_prep(struct dhd_bus *bus);
extern void dhdpcie_bus_release(struct dhd_bus *bus);
extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
extern void dhdpcie_free_irq(dhd_bus_t *bus);
extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
-extern int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state);
+extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
+#ifndef BCMPCIE_OOB_HOST_WAKE
+extern void dhdpcie_pme_active(osl_t *osh, bool enable);
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_disable_device(dhd_bus_t *bus);
extern int dhdpcie_enable_device(dhd_bus_t *bus);
extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
extern void dhdpcie_free_resource(dhd_bus_t *bus);
+extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
+extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
+extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
#endif /* dhd_pcie_h */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_pcie_linux.c 491657 2014-07-17 06:29:40Z $
+ * $Id: dhd_pcie_linux.c 506043 2014-10-02 12:29:45Z $
*/
#include <dhd_pcie.h>
#include <dhd_linux.h>
#ifdef CONFIG_ARCH_MSM
+#ifdef CONFIG_ARCH_MSM8994
+#include <linux/msm_pcie.h>
+#else
#include <mach/msm_pcie.h>
#endif
+#endif /* CONFIG_ARCH_MSM */
#define PCI_CFG_RETRY 10
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
int irq;
char pciname[32];
struct pci_saved_state* state;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ void *os_cxt; /* Pointer to per-OS private data */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
} dhdpcie_info_t;
struct tasklet_struct tuning_tasklet;
};
+#ifdef BCMPCIE_OOB_HOST_WAKE
+typedef struct dhdpcie_os_info {
+ int oob_irq_num; /* valid when hardware or software oob in use */
+ unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
+ bool oob_irq_registered;
+ bool oob_irq_enabled;
+ bool oob_irq_wake_enabled;
+ spinlock_t oob_irq_spinlock;
+ void *dev; /* handle to the underlying device */
+} dhdpcie_os_info_t;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
/* function declarations */
static int __devinit
static irqreturn_t dhdpcie_isr(int irq, void *arg);
/* OS Routine functions for PCI suspend/resume */
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+DEFINE_MUTEX(_dhd_sdio_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif
+
static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state);
static int dhdpcie_pci_resume(struct pci_dev *dev);
int dhdpcie_init_succeeded = FALSE;
-static void dhdpcie_pme_active(struct pci_dev *pdev, bool enable)
-{
- uint16 pmcsr;
-
- pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
- /* Clear PME Status by writing 1 to it and enable PME# */
- pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
- if (!enable)
- pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
-
- pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr);
-}
-
static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state)
{
int ret = 0;
/* When firmware is not loaded do the PCI bus */
/* suspend/resume only */
if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) &&
- !bus->dhd->dongle_reset) {
- ret = dhdpcie_pci_suspend_resume(bus->dev, state);
- return ret;
- }
+#ifdef CONFIG_MACH_UNIVERSAL5433
+ /* RB:34285 check_rev() : return 1 - new rev., 0 - old rev. */
+ (!check_rev() || (check_rev() && !bus->dhd->dongle_reset)))
+#else
+ !bus->dhd->dongle_reset)
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
+ {
+ ret = dhdpcie_pci_suspend_resume(bus, state);
+ return ret;
+ }
if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)||
- (bus->dhd->busstate == DHD_BUS_DATA))) {
+ (bus->dhd->busstate == DHD_BUS_DATA)) &&
+ (bus->suspended != state)) {
ret = dhdpcie_bus_suspend(bus, state);
}
{
int ret;
DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
- dhdpcie_pme_active(dev, TRUE);
pci_save_state(dev);
pci_enable_wake(dev, PCI_D0, TRUE);
pci_disable_device(dev);
printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
return err;
}
- dhdpcie_pme_active(dev, FALSE);
return err;
}
-int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state)
+int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state)
{
int rc;
+ struct pci_dev *dev = bus->dev;
- if (state)
+ if (state) {
+#ifndef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_pme_active(bus->osh, state);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
rc = dhdpcie_suspend_dev(dev);
- else
+ } else {
rc = dhdpcie_resume_dev(dev);
+#ifndef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_pme_active(bus->osh, state);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ }
return rc;
}
return -ENODEV;
}
+#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
+ /* disable async suspend */
+ device_disable_async_suspend(&pdev->dev);
+#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
+
DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
return 0;
}
osl_t *osh = NULL;
dhdpcie_info_t *pch = NULL;
dhd_bus_t *bus = NULL;
+#ifdef PCIE_TX_DEFERRAL
+ struct sk_buff *skb;
+#endif
DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
+ DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
+ }
+ else {
+ DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
+ }
+ mutex_lock(&_dhd_sdio_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif
+
pch = pci_get_drvdata(pdev);
bus = pch->bus;
osh = pch->osh;
+#ifdef PCIE_TX_DEFERRAL
+ if (bus->tx_wq)
+ destroy_workqueue(bus->tx_wq);
+ skb = skb_dequeue(&bus->orphan_list);
+ while (skb) {
+ PKTCFREE(osh, skb, TRUE);
+ skb = skb_dequeue(&bus->orphan_list);
+ }
+#endif
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ if (bus)
+ msm_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ dhdpcie_bus_remove_prep(bus);
dhdpcie_bus_release(bus);
pci_disable_device(pdev);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* pcie os info detach */
+ MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
+#endif /* BCMPCIE_OOB_HOST_WAKE */
/* pcie info detach */
dhdpcie_detach(pch);
/* osl detach */
dhdpcie_init_succeeded = FALSE;
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&_dhd_sdio_mutex_lock_);
+ DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif /* LINUX */
+
DHD_TRACE(("%s Exit\n", __FUNCTION__));
return;
DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
return -1;
}
+ bus->irq_registered = TRUE;
DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
}
dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
- dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+ dhdpcie_info->tcm_size =
+ (bar1_size < DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
}
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+void dhdpcie_linkdown_cb(struct msm_pcie_notify *noti)
+{
+ struct pci_dev *pdev = (struct pci_dev *)noti->user;
+ dhdpcie_info_t *pch = NULL;
+
+ if (pdev) {
+ pch = pci_get_drvdata(pdev);
+ if (pch) {
+ dhd_bus_t *bus = pch->bus;
+ if (bus) {
+ dhd_pub_t *dhd = bus->dhd;
+ if (dhd) {
+ DHD_ERROR(("%s: Event HANG send up "
+ "due to PCIe linkdown\n",
+ __FUNCTION__));
+ bus->islinkdown = TRUE;
+ DHD_OS_WAKE_LOCK(dhd);
+ dhd_os_check_hang(dhd, 0, -ETIMEDOUT);
+ }
+ }
+ }
+ }
+
+}
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#ifdef PCIE_TX_DEFERRAL
+static void dhd_pcie_create_flow_worker(struct work_struct *worker)
+{
+ dhd_bus_t *bus;
+ struct sk_buff *skb;
+ uint16 ifidx, flowid;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ bus = container_of(worker, dhd_bus_t, create_flow_work);
+ skb = skb_dequeue(&bus->orphan_list);
+ while (skb) {
+ ifidx = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(skb));
+ if (BCME_OK != dhd_flowid_update(bus->dhd, ifidx,
+ bus->dhd->flow_prio_map[(PKTPRIO(skb))], skb)) {
+ PKTCFREE(bus->dhd->osh, skb, TRUE);
+ skb = skb_dequeue(&bus->orphan_list);
+ continue;
+ }
+ flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(skb));
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ queue = &flow_ring_node->queue;
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ if ((flowid >= bus->dhd->num_flow_rings) ||
+ (!flow_ring_node->active) ||
+ (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_ERROR(("%s: Dropping pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ PKTCFREE(bus->dhd->osh, skb, TRUE);
+ skb = skb_dequeue(&bus->orphan_list);
+ continue;
+ }
+ if (BCME_OK != dhd_flow_queue_enqueue(bus->dhd, queue, skb)) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ PKTCFREE(bus->dhd->osh, skb, TRUE);
+ skb = skb_dequeue(&bus->orphan_list);
+ continue;
+ }
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ if (flow_ring_node->status == FLOW_RING_STATUS_OPEN)
+ dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+ skb = skb_dequeue(&bus->orphan_list);
+ }
+}
+
+static void dhd_pcie_delete_flow_worker(struct work_struct *worker)
+{
+ dhd_bus_t *bus;
+ uint16 flowid;
+
+ bus = container_of(worker, dhd_bus_t, delete_flow_work);
+ for_each_set_bit(flowid, bus->delete_flow_map, bus->dhd->num_flow_rings) {
+ clear_bit(flowid, bus->delete_flow_map);
+ dhd_bus_flow_ring_delete_response(bus, flowid, BCME_OK);
+ }
+}
+
+#endif /* PCIE_TX_DEFERRAL */
+
+#if defined(MULTIPLE_SUPPLICANT)
+extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif
+
int dhdpcie_init(struct pci_dev *pdev)
{
dhd_bus_t *bus = NULL;
dhdpcie_info_t *dhdpcie_info = NULL;
wifi_adapter_info_t *adapter = NULL;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_os_info_t *dhdpcie_osinfo = NULL;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
+ DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
+ }
+ else {
+ DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
+ }
+ mutex_lock(&_dhd_sdio_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif
do {
/* osl attach */
dhdpcie_info->osh = osh;
dhdpcie_info->dev = pdev;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* allocate OS speicific structure */
+ dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
+ if (dhdpcie_osinfo == NULL) {
+ DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
+ __FUNCTION__));
+ break;
+ }
+ bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+ dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
+
+ /* Initialize host wake IRQ */
+ spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
+ /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
+ dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
+ &dhdpcie_osinfo->oob_irq_flags);
+ if (dhdpcie_osinfo->oob_irq_num < 0) {
+ DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
/* Find the PCI resources, verify the */
/* vendor and device ID, map BAR regions and irq, update in structures */
if (dhdpcie_scan_resource(dhdpcie_info)) {
}
/* Bus initialization */
- bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm);
+ bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs,
+ dhdpcie_info->tcm, dhdpcie_info->tcm_size);
if (!bus) {
DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
break;
dhdpcie_info->bus = bus;
dhdpcie_info->bus->dev = pdev;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
+ bus->pcie_event.user = pdev;
+ bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
+ bus->pcie_event.callback = dhdpcie_linkdown_cb;
+ bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
+ msm_pcie_register_event(&bus->pcie_event);
+ bus->islinkdown = FALSE;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
if (bus->intr) {
/* Register interrupt callback, but mask it (not operational yet). */
DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
"due to polling mode\n", __FUNCTION__));
}
+#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd
if (dhd_download_fw_on_driverload) {
if (dhd_bus_start(bus->dhd)) {
DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
break;
}
}
+#endif
/* set private data for pci_dev */
pci_set_drvdata(pdev, dhdpcie_info);
+#ifdef PCIE_TX_DEFERRAL
+ bus->tx_wq = create_singlethread_workqueue("bcmdhd_tx");
+ if (bus->tx_wq == NULL) {
+ DHD_ERROR(("%s workqueue creation failed\n", __FUNCTION__));
+ break;
+ }
+ INIT_WORK(&bus->create_flow_work, dhd_pcie_create_flow_worker);
+ INIT_WORK(&bus->delete_flow_work, dhd_pcie_delete_flow_worker);
+ skb_queue_head_init(&bus->orphan_list);
+#endif /* PCIE_TX_DEFERRAL */
+
/* Attach to the OS network interface */
DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
if (dhd_register_if(bus->dhd, 0, TRUE)) {
dhdpcie_init_succeeded = TRUE;
+#if defined(MULTIPLE_SUPPLICANT)
+ wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&_dhd_sdio_mutex_lock_);
+ DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif
+
DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
return 0; /* return SUCCESS */
if (bus)
dhdpcie_bus_release(bus);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ if (dhdpcie_osinfo)
+ MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
if (dhdpcie_info)
dhdpcie_detach(dhdpcie_info);
pci_disable_device(pdev);
osl_detach(osh);
dhdpcie_init_succeeded = FALSE;
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&_dhd_sdio_mutex_lock_);
+ DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif
DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
struct pci_dev *pdev = NULL;
DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
- if (bus) {
+ if (bus && bus->irq_registered) {
pdev = bus->dev;
free_irq(pdev->irq, bus);
+ bus->irq_registered = FALSE;
}
DHD_TRACE(("%s: Exit\n", __FUNCTION__));
return;
dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
{
int ret = 0;
+#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
int options = 0;
#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
if (bus == NULL)
if (bus->dev == NULL)
return BCME_ERROR;
-#if defined(CONFIG_ARCH_MSM)
+#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
if (bus->islinkdown) {
options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
}
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
- NULL, NULL, options);
+ bus->dev, NULL, options);
if (bus->islinkdown && !ret) {
msm_pcie_recover_config(bus->dev);
if (bus->dhd)
}
#else
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
- NULL, NULL, 0);
+ bus->dev, NULL, 0);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (ret) {
DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
{
int ret = 0;
+#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
int options = 0;
-#endif
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
if (bus == NULL)
if (bus->dev == NULL)
return BCME_ERROR;
-#if defined(CONFIG_ARCH_MSM)
+#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
if (bus->islinkdown)
options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
- NULL, NULL, options);
+ bus->dev, NULL, options);
#else
ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
- NULL, NULL, 0);
+ bus->dev, NULL, 0);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (ret) {
DHD_ERROR(("Failed to stop PCIe link\n"));
if (pch == NULL)
return BCME_ERROR;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ /* Updated with pci_load_and_free_saved_state to compatible
+ * with kernel 3.14 or higher
+ */
+ if (pci_load_and_free_saved_state(bus->dev, &pch->state))
+ pci_disable_device(bus->dev);
+ else
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)))
if (pci_load_saved_state(bus->dev, pch->state))
pci_disable_device(bus->dev);
- else {
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ else
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) and
+ * (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+ * (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ */
+ {
pci_restore_state(bus->dev);
ret = pci_enable_device(bus->dev);
if (!ret)
pci_set_master(bus->dev);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) and
+ * (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+ * (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ */
if (ret)
pci_disable_device(bus->dev);
}
bus->regs = dhdpcie_info->regs;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
- dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+ dhdpcie_info->tcm_size =
+ (bar1_size < DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
if (!dhdpcie_info->tcm) {
DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
REG_UNMAP(dhdpcie_info->regs);
}
bus->tcm = dhdpcie_info->tcm;
+ bus->tcm_size = dhdpcie_info->tcm_size;
DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
__FUNCTION__, dhdpcie_info->regs, bar0_addr));
bus->tcm = NULL;
}
}
+
+int
+dhdpcie_bus_request_irq(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *dhdpcie_info;
+ int ret = 0;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhdpcie_info = pci_get_drvdata(bus->dev);
+ if (dhdpcie_info == NULL) {
+ DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+ dhdpcie_bus_intr_disable(bus);
+ ret = dhdpcie_request_irq(dhdpcie_info);
+ if (ret) {
+ DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
+{
+ unsigned long flags;
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+ spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
+ if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
+ (dhdpcie_osinfo->oob_irq_num > 0)) {
+ if (enable)
+ enable_irq(dhdpcie_osinfo->oob_irq_num);
+ else
+ disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
+ dhdpcie_osinfo->oob_irq_enabled = enable;
+ }
+ spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *data)
+{
+ dhd_bus_t *bus;
+ DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
+ bus = (dhd_bus_t *)data;
+ if (bus->dhd->up && bus->suspended) {
+ DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
+ }
+ return IRQ_HANDLED;
+}
+
+int dhdpcie_oob_intr_register(dhd_bus_t *bus)
+{
+ int err = 0;
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+ if (dhdpcie_osinfo->oob_irq_registered) {
+ DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
+ return -EBUSY;
+ }
+
+ if (dhdpcie_osinfo->oob_irq_num > 0) {
+ DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__,
+ (int)dhdpcie_osinfo->oob_irq_num,
+ (int)dhdpcie_osinfo->oob_irq_flags));
+ err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
+ dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
+ bus);
+ if (err) {
+ DHD_ERROR(("%s: request_irq failed with %d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+ err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
+ if (!err)
+ dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
+ dhdpcie_osinfo->oob_irq_enabled = TRUE;
+ }
+
+ dhdpcie_osinfo->oob_irq_registered = TRUE;
+
+ return err;
+}
+
+void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
+{
+ int err = 0;
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+ if (!dhdpcie_osinfo->oob_irq_registered) {
+ DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
+ return;
+ }
+ if (dhdpcie_osinfo->oob_irq_num > 0) {
+ if (dhdpcie_osinfo->oob_irq_wake_enabled) {
+ err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
+ if (!err)
+ dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
+ }
+ if (dhdpcie_osinfo->oob_irq_enabled) {
+ disable_irq(dhdpcie_osinfo->oob_irq_num);
+ dhdpcie_osinfo->oob_irq_enabled = FALSE;
+ }
+ free_irq(dhdpcie_osinfo->oob_irq_num, bus);
+ }
+ dhdpcie_osinfo->oob_irq_registered = FALSE;
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
extern int dhd_pno_deinit(dhd_pub_t *dhd);
#endif
-#if (defined(NDISVER) && (NDISVER >= 0x0630)) && defined(PNO_SUPPORT)
+#if defined(NDISVER)
+#if defined(PNO_SUPPORT)
+#if (NDISVER >= 0x0630)
extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
extern int dhd_pno_clean(dhd_pub_t *dhd);
-#endif /* (defined(NDISVER) && (NDISVER >= 0x0630)) && defined(PNO_SUPPORT) */
+#endif /* #if (NDISVER >= 0x0630) */
+#endif /* #if defined(PNO_SUPPORT) */
+#endif /* #if defined(NDISVER) */
#endif /* __DHD_PNO_H__ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_proto.h 490409 2014-07-10 16:34:27Z $
+ * $Id: dhd_proto.h 499674 2014-08-29 21:56:23Z $
*/
#ifndef _dhd_proto_h_
#include <dhd_flowring.h>
#endif
+#define DEFAULT_IOCTL_RESP_TIMEOUT 2000
#ifndef IOCTL_RESP_TIMEOUT
-#define IOCTL_RESP_TIMEOUT 2000 /* In milli second default value for Production FW */
+/* In milli second default value for Production FW */
+#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT
#endif /* IOCTL_RESP_TIMEOUT */
#ifndef MFG_IOCTL_RESP_TIMEOUT
uint reorder_info_len, void **pkt, uint32 *free_buf_count);
#ifdef BCMPCIE
-extern int dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd);
-extern int dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd);
+extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound);
+extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound);
extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_sdio.c 489913 2014-07-08 18:57:48Z $
+ * $Id: dhd_sdio.c 506046 2014-10-02 12:40:12Z $
*/
#include <typedefs.h>
1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+#ifdef USE_CMD14
/* Add CMD14 Support */
dhdsdio_devcap_set(bus,
(SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+#endif /* USE_CMD14 */
+
+ dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
int ret = BCME_ERROR;
osl_t *osh;
uint datalen, prec;
-#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+#if defined(DHD_TX_DUMP)
uint8 *dump_data;
uint16 protocol;
-#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+#endif /* DHD_TX_DUMP */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
BCM_REFERENCE(datalen);
#endif /* SDTEST */
-#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+#if defined(DHD_TX_DUMP)
dump_data = PKTDATA(osh, pkt);
dump_data += 4; /* skip 4 bytes header */
protocol = (dump_data[12] << 8) | dump_data[13];
DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
dump_data[14], dump_data[15], dump_data[30]));
}
-#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+#endif /* DHD_TX_DUMP */
#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
{
varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
varaddr = (bus->ramsize - 4) - varsize;
+ // terence 20150412: fix for nvram failed to download
+ if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
+ bus->dhd->conf->chip == BCM43341_CHIP_ID) {
+ varsize = varsize ? ROUNDUP(varsize, 64) : 0;
+ varaddr = (bus->ramsize - 64) - varsize;
+ }
+
varaddr += bus->dongle_ram_base;
if (bus->vars) {
} else
#endif /* BCMSDIOH_TXGLOM */
bus->txglom_enable = FALSE;
+ printk("%s: enable %d\n", __FUNCTION__, bus->txglom_enable);
}
int
if (bus->reqbussleep)
{
- dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_bussleep(bus, TRUE);
bus->reqbussleep = FALSE;
}
bus->readframes = FALSE;
#if defined(SDIO_ISR_THREAD)
DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
DHD_OS_WAKE_LOCK(bus->dhd);
- dhdsdio_dpc(bus);
+ /* terence 20150209: dpc should be scheded again if dpc_sched is TRUE or dhd_bus_txdata can
+ not schedule anymore because dpc_sched is TRUE now.
+ */
+ if (dhdsdio_dpc(bus)) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
DHD_OS_WAKE_UNLOCK(bus->dhd);
#else
return FALSE;
}
+#if defined(MULTIPLE_SUPPLICANT)
+extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif
+
static void *
dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
{
int ret;
dhd_bus_t *bus;
+ struct ether_addr ea_addr;
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
goto fail;
}
-#ifdef PROP_TXSTATUS
- // terence 20131215: disable_proptx should be set before dhd_attach
- if ((bus->sih->chip == BCM43362_CHIP_ID) || (bus->sih->chip == BCM4330_CHIP_ID)) {
- printf("%s: Disable prop_txstatus\n", __FUNCTION__);
- disable_proptx = 1;
- }
-#endif
-
/* Attach to the dhd/OS/network interface */
if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
/* if firmware path present try to download and bring up bus */
bus->dhd->hang_report = TRUE;
+#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd
if (dhd_download_fw_on_driverload) {
if ((ret = dhd_bus_start(bus->dhd)) != 0) {
DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
goto fail;
}
}
+#endif
+
+#ifdef GET_OTP_MAC_ENABLE
+ if (dhd_conf_get_mac(bus->dhd, sdh, ea_addr.octet)) {
+ DHD_TRACE(("%s: Can not read MAC address\n", __FUNCTION__));
+ } else
+ memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
/* Ok, have the per-port tell the stack we're open for business */
if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
#if defined(MULTIPLE_SUPPLICANT)
+ wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
__FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
}
bus->use_rxchain = (bool)bus->sd_rxchain;
+ if (bus->dhd->conf->use_rxchain >= 0) {
+ printf("%s: set use_rxchain %d from config.txt\n", __FUNCTION__, bus->dhd->conf->use_rxchain);
+ bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain;
+ }
+ /* Setting default Glom size */
+ if (bus->dhd->conf->txglomsize >= 0) {
+ printf("%s: set txglomsize %d from config.txt\n", __FUNCTION__, bus->dhd->conf->txglomsize);
+ bus->txglomsize = bus->dhd->conf->txglomsize;
+ }
bus->txinrx_thres = CUSTOM_TXINRX_THRES;
/* TX first in dhdsdio_readframes() */
bus->dotxinrx = TRUE;
{
int ret;
-
DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
__FUNCTION__, bus->fw_path, bus->nv_path));
DHD_OS_WAKE_LOCK(bus->dhd);
/* External conf takes precedence if specified */
dhd_conf_preinit(bus->dhd);
- dhd_conf_read_config(bus->dhd);
+ dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
- dhd_conf_set_fw_path(bus->dhd, bus->fw_path);
- dhd_conf_set_nv_path(bus->dhd, bus->nv_path);
dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->fw_path);
dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->nv_path);
/* Download image */
while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ // terence 20150412: fix for firmware failed to download
+ if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
+ bus->dhd->conf->chip == BCM43341_CHIP_ID) {
+ if (len%64 != 0) {
+ memset(memptr+len, 0, len%64);
+ len += (64 - len%64);
+ }
+ }
if (len < 0) {
DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
bcmerror = BCME_ERROR;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_wlfc.c 490028 2014-07-09 05:58:25Z $
+ * $Id: dhd_wlfc.c 501046 2014-09-06 01:25:16Z $
*
*/
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
-#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
-#endif /* DHDTCPACK_SUPPRESS */
/*
}
static int
-_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal,
uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
{
uint32 wl_pktinfo = 0;
dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
struct bdc_header *h;
+ void *p = *packet;
if (skip_wlfc_hdr)
goto push_bdc_hdr;
h->flags2 = 0;
h->dataOffset = dataOffset >> 2;
BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+ *packet = p;
return BCME_OK;
}
* STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
* have their own entry.
*/
- if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
- iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
+ if ((DHD_IF_ROLE_STA(iftype) || ETHER_ISMULTI(dstn)) &&
(ctx->destination_entries.interfaces[ifid].occupied)) {
entry = &ctx->destination_entries.interfaces[ifid];
}
/* pkt in delayed q, so fake push BDC header for
* dhd_tcpack_check_xmit() and dhd_txcomplete().
*/
- _dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0, 0, 0, TRUE);
+ _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE);
/* This packet is about to be freed, so remove it from tcp_ack_info_tbl
* This must be one of...
if (p) {
PKTPULL(ctx->osh, p, dummylen);
DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
- _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
+ _dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
#ifdef PROP_TXSTATUS_DEBUG
static int
_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
- wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+ wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot)
{
int rc = BCME_OK;
int hslot = WLFC_HANGER_MAXITEMS;
bool send_tim_update = FALSE;
uint32 htod = 0;
uint16 htodseq = 0;
- uint8 free_ctr;
+ uint8 free_ctr, flags = 0;
int gen = 0xff;
dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+ void * p = *packet;
*slot = hslot;
return BCME_ERROR;
}
- WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
- WL_TXSTATUS_SET_HSLOT(htod, hslot);
- WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
- WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
- WL_TXSTATUS_SET_GENERATION(htod, gen);
- DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
-
+ flags = WLFC_PKTFLAG_PKTFROMHOST;
if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
/*
Indicate that this packet is being sent in response to an
explicit request from the firmware side.
*/
- WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
- } else {
- WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+ flags |= WLFC_PKTFLAG_PKT_REQUESTED;
+ }
+ if (pkt_is_dhcp(ctx->osh, p)) {
+ flags |= WLFC_PKTFLAG_PKT_FORCELOWRATE;
}
- rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+ WL_TXSTATUS_SET_HSLOT(htod, hslot);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WL_TXSTATUS_SET_FLAGS(htod, flags);
+ WL_TXSTATUS_SET_GENERATION(htod, gen);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
+ rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update,
entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
if (rc == BCME_OK) {
DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
}
}
*slot = hslot;
+ *packet = p;
return rc;
}
}
ASSERT(entry);
+ if (entry->transit_count < 0) {
+ DHD_ERROR(("Error: %s():%d transit_count %d < 0\n",
+ __FUNCTION__, __LINE__, entry->transit_count));
+ continue;
+ }
if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
!(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) {
} else {
if (item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS) {
/* free slot */
- ASSERT(item->state != WLFC_HANGER_ITEM_STATE_FREE);
+ if (item->state == WLFC_HANGER_ITEM_STATE_FREE)
+ DHD_ERROR(("Error: %s():%d get multi TXSTATUS for one packet???\n",
+ __FUNCTION__, __LINE__));
item->state = WLFC_HANGER_ITEM_STATE_FREE;
}
}
/* pkt in delayed q, so fake push BDC header for
* dhd_tcpack_check_xmit() and dhd_txcomplete().
*/
- _dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0,
+ _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0,
0, 0, TRUE);
#ifdef DHDTCPACK_SUPPRESS
if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ entry->suppressed = FALSE;
+ entry->transit_count = 0;
+ entry->suppr_transit_count = 0;
+ }
+
+#ifdef P2PONEINT
+ if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) ||
+ ((action == eWLFC_MAC_ENTRY_ACTION_UPDATE) && (entry->psq.num_prec == 0))) {
+#else
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+#endif
dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
entry->occupied = 0;
- entry->suppressed = 0;
entry->state = WLFC_STATE_CLOSE;
- entry->requested_credit = 0;
- entry->transit_count = 0;
- entry->suppr_transit_count = 0;
memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
if (entry->next) {
credit count.
*/
DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
- rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+ rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p,
commit_info->needs_hdr, &hslot);
if (rc == BCME_OK) {
}
/* allocate space to track txstatus propagated from firmware */
- dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO,
+ sizeof(athost_wl_status_info_t));
if (dhd->wlfc_state == NULL) {
rc = BCME_NOMEM;
goto exit;
if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
if (wlfc->hanger == NULL) {
- MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ DHD_OS_PREFREE(dhd, dhd->wlfc_state,
+ sizeof(athost_wl_status_info_t));
dhd->wlfc_state = NULL;
rc = BCME_NOMEM;
goto exit;
if (pktbuf) {
uint32 htod = 0;
WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
- _dhd_wlfc_pushheader(ctx, pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+ _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE);
if (fcommit(commit_ctx, pktbuf))
PKTFREE(ctx->osh, pktbuf, TRUE);
rc = BCME_OK;
/* free top structure */
- MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ DHD_OS_PREFREE(dhd, dhd->wlfc_state,
+ sizeof(athost_wl_status_info_t));
dhd->wlfc_state = NULL;
dhd->proptxstatus_mode = hostreorder ?
WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
/*
* $Copyright Open 2009 Broadcom Corporation$
-* $Id: dhd_wlfc.h 490028 2014-07-09 05:58:25Z $
+* $Id: dhd_wlfc.h 501046 2014-09-06 01:25:16Z $
*
*/
#ifndef __wlfc_host_driver_definitions_h__
uint8 send_tim_signal;
uint8 mac_handle;
/* Number of packets at dongle for this entry. */
- uint transit_count;
+ int transit_count;
/* Numbe of suppression to wait before evict from delayQ */
- uint suppr_transit_count;
+ int suppr_transit_count;
/* flag. TRUE when in suppress state */
uint8 suppressed;
q->len--;
- // terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit
- if (q->len == 0) {
- q->head = NULL;
- q->tail = NULL;
- }
+ // terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit
+ if (q->len == 0) {
+ q->head = NULL;
+ q->tail = NULL;
+ }
if (prec_out)
*prec_out = prec;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmmsgbuf.h 490808 2014-07-12 00:33:13Z $
+ * $Id: bcmmsgbuf.h 499474 2014-08-28 21:30:10Z $
*/
#ifndef _bcmmsgbuf_h_
#define _bcmmsgbuf_h_
uint16 metadata_buf_len;
/* provided data buffer len to receive data */
uint16 data_len;
- uint32 rsvd;
+ uint32 flag2;
} host_txbuf_post_t;
#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01
#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT
#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK
+#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK 0x01
+#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT 0
+
/* H2D Txpost ring work items */
typedef union txbuf_submit_item {
host_txbuf_post_t txpost;
* Explains the shared area between host and dongle
* $Copyright Open 2005 Broadcom Corporation$
*
- * $Id: bcmpcie.h 490808 2014-07-12 00:33:13Z $
+ * $Id: bcmpcie.h 497456 2014-08-19 15:06:33Z $
*/
#ifndef _bcmpcie_h_
#define PCIE_SHARED_EVT_SEQNUM 0x08000
#define PCIE_SHARED_DMA_INDEX 0x10000
+/* D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM */
+#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000
+#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000
+#define PCIE_SHARED_D2H_SYNC_MODE_MASK \
+ (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM)
+
#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0
#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1
#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc.h 408158 2013-06-17 22:15:35Z $
+ * $Id: bcmsdh_sdmmc.h 496576 2014-08-13 15:04:56Z $
*/
#ifndef __BCMSDH_SDMMC_H__
/* private bus modes */
#define SDIOH_MODE_SD4 2
#define CLIENT_INTR 0x100 /* Get rid of this! */
-#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
+#define SDIOH_SDMMC_MAX_SG_ENTRIES (SDPCM_MAXGLOM_SIZE+2)
struct sdioh_info {
osl_t *osh; /* osh handler */
extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#ifdef GLOBAL_SDMMC_INSTANCE
typedef struct _BCMSDH_SDMMC_INSTANCE {
sdioh_info_t *sd;
struct sdio_func *func[SDIOD_MAX_IOFUNCS];
} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+#endif
+
#endif /* __BCMSDH_SDMMC_H__ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmutils.h 490808 2014-07-12 00:33:13Z $
+ * $Id: bcmutils.h 504037 2014-09-22 19:03:15Z $
*/
#ifndef _bcmutils_h_
/* buffer length for ethernet address from bcm_ether_ntoa() */
#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */
+static INLINE uint32 /* 32bit word aligned xor-32 */
+bcm_compute_xor32(volatile uint32 *u32, int num_u32)
+{
+ int i;
+ uint32 xor32 = 0;
+ for (i = 0; i < num_u32; i++)
+ xor32 ^= *(u32 + i);
+ return xor32;
+}
+
/* crypto utility function */
/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
static INLINE void
*/
extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16);
extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl);
+extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16);
/* Allocate a unique 16bit id */
extern uint16 id16_map_alloc(void * id16_map_hndl);
static INLINE dll_t *
dll_prev_p(dll_t *node_p)
{
- return (node_p)->next_p;
+ return (node_p)->prev_p;
}
#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
-/* given a proprietary MCS, get number of spatial streams */
-#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+#define GET_PRO_PRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \
- : ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
+ : ((mcs) == 32 ? 1 : GET_PRO_PRIETARY_11N_MCS_NSS(mcs)))
#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
#define EPI_MINOR_VERSION 201
-#define EPI_RC_NUMBER 34
+#define EPI_RC_NUMBER 59
#define EPI_INCREMENTAL_NUMBER 0
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 1, 201, 34, 0
+#define EPI_VERSION 1, 201, 59, 0
-#define EPI_VERSION_NUM 0x01c92200
+#define EPI_VERSION_NUM 0x01c93b00
-#define EPI_VERSION_DEV 1.201.34
+#define EPI_VERSION_DEV 1.201.59
/* Driver Version String, ASCII, 32 chars max */
-#define EPI_VERSION_STR "1.201.34.2 (r491657)"
+#define EPI_VERSION_STR "1.201.59.3 (r506368)"
#endif /* _epivers_h_ */
fi
# Following SVNURL should be expanded on checkout
- SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/proj/tags/DHD/DHD_REL_1_201_34/src/include/epivers.sh $'
+ SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/proj/tags/DHD/DHD_REL_1_201_59/src/include/epivers.sh $'
# .gclient_info is created by gclient checkout/sync steps
# and contains "DEPS='<deps-url1> <deps-url2> ..." entry
--- /dev/null
+/*
+ * EVENT_LOG system definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: event_log.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _EVENT_LOG_H_
+#define _EVENT_LOG_H_
+
+#include <typedefs.h>
+
+/* Set a maximum number of sets here. It is not dynamic for
+ * efficiency of the EVENT_LOG calls.
+ */
+#define NUM_EVENT_LOG_SETS 4
+#define EVENT_LOG_SET_BUS 0
+#define EVENT_LOG_SET_WL 1
+#define EVENT_LOG_SET_PSM 2
+#define EVENT_LOG_SET_DBG 3
+
+/* Define new event log tags here */
+#define EVENT_LOG_TAG_NULL 0 /* Special null tag */
+#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */
+#define EVENT_LOG_TAG_BUS_OOB 2
+#define EVENT_LOG_TAG_BUS_STATE 3
+#define EVENT_LOG_TAG_BUS_PROTO 4
+#define EVENT_LOG_TAG_BUS_CTL 5
+#define EVENT_LOG_TAG_BUS_EVENT 6
+#define EVENT_LOG_TAG_BUS_PKT 7
+#define EVENT_LOG_TAG_BUS_FRAME 8
+#define EVENT_LOG_TAG_BUS_DESC 9
+#define EVENT_LOG_TAG_BUS_SETUP 10
+#define EVENT_LOG_TAG_BUS_MISC 11
+#define EVENT_LOG_TAG_SRSCAN 22
+#define EVENT_LOG_TAG_PWRSTATS_INFO 23
+#define EVENT_LOG_TAG_UCODE_WATCHDOG 26
+#define EVENT_LOG_TAG_UCODE_FIFO 27
+#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28
+#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29
+#define EVENT_LOG_TAG_SCAN_ERROR 30
+#define EVENT_LOG_TAG_SCAN_WARN 31
+#define EVENT_LOG_TAG_MPF_ERR 32
+#define EVENT_LOG_TAG_MPF_WARN 33
+#define EVENT_LOG_TAG_MPF_INFO 34
+#define EVENT_LOG_TAG_MPF_DEBUG 35
+#define EVENT_LOG_TAG_EVENT_INFO 36
+#define EVENT_LOG_TAG_EVENT_ERR 37
+#define EVENT_LOG_TAG_PWRSTATS_ERROR 38
+#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39
+#define EVENT_LOG_TAG_IOCTL_LOG 40
+#define EVENT_LOG_TAG_PFN_ERR 41
+#define EVENT_LOG_TAG_PFN_WARN 42
+#define EVENT_LOG_TAG_PFN_INFO 43
+#define EVENT_LOG_TAG_PFN_DEBUG 44
+#define EVENT_LOG_TAG_BEACON_LOG 45
+#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46
+#define EVENT_LOG_TAG_TRACE_CHANSW 47
+#define EVENT_LOG_TAG_PCI_ERROR 48
+#define EVENT_LOG_TAG_PCI_TRACE 49
+#define EVENT_LOG_TAG_PCI_WARN 50
+#define EVENT_LOG_TAG_PCI_INFO 51
+#define EVENT_LOG_TAG_PCI_DBG 52
+#define EVENT_LOG_TAG_PCI_DATA 53
+#define EVENT_LOG_TAG_PCI_RING 54
+#define EVENT_LOG_TAG_MAX 55 /* Set to the same value of last tag, not last tag + 1 */
+/* Note: New event should be added/reserved in trunk before adding it to branches */
+
+/* Flags for tag control */
+#define EVENT_LOG_TAG_FLAG_NONE 0
+#define EVENT_LOG_TAG_FLAG_LOG 0x80
+#define EVENT_LOG_TAG_FLAG_PRINT 0x40
+#define EVENT_LOG_TAG_FLAG_MASK 0x3f
+
+/* logstrs header */
+#define LOGSTRS_MAGIC 0x4C4F4753
+#define LOGSTRS_VERSION 0x1
+
+/* We make sure that the block size will fit in a single packet
+ * (allowing for a bit of overhead on each packet
+ */
+#define EVENT_LOG_MAX_BLOCK_SIZE 1400
+#define EVENT_LOG_PSM_BLOCK 0x200
+#define EVENT_LOG_BUS_BLOCK 0x200
+#define EVENT_LOG_DBG_BLOCK 0x100
+
+/*
+ * There are multiple levels of objects define here:
+ * event_log_set - a set of buffers
+ * event log groups - every event log call is part of just one. All
+ * event log calls in a group are handled the
+ * same way. Each event log group is associated
+ * with an event log set or is off.
+ */
+
+#ifndef __ASSEMBLER__
+
+/* On the external system where the dumper is we need to make sure
+ * that these types are the same size as they are on the ARM the
+ * produced them
+ */
+#ifdef EVENT_LOG_DUMPER
+#define _EL_BLOCK_PTR uint32
+#define _EL_TYPE_PTR uint32
+#define _EL_SET_PTR uint32
+#define _EL_TOP_PTR uint32
+#else
+#define _EL_BLOCK_PTR struct event_log_block *
+#define _EL_TYPE_PTR uint32 *
+#define _EL_SET_PTR struct event_log_set **
+#define _EL_TOP_PTR struct event_log_top *
+#endif /* EVENT_LOG_DUMPER */
+
+/* Each event log entry has a type. The type is the LAST word of the
+ * event log. The printing code walks the event entries in reverse
+ * order to find the first entry.
+ */
+typedef union event_log_hdr {
+ struct {
+ uint8 tag; /* Event_log entry tag */
+ uint8 count; /* Count of 4-byte entries */
+ uint16 fmt_num; /* Format number */
+ };
+ uint32 t; /* Type cheat */
+} event_log_hdr_t;
+
+/* Event log sets (a logical circurlar buffer) consist of one or more
+ * event_log_blocks. The blocks themselves form a logical circular
+ * list. The log entries are placed in each event_log_block until it
+ * is full. Logging continues with the next event_log_block in the
+ * event_set until the last event_log_block is reached and then
+ * logging starts over with the first event_log_block in the
+ * event_set.
+ */
+typedef struct event_log_block {
+ _EL_BLOCK_PTR next_block;
+ _EL_BLOCK_PTR prev_block;
+ _EL_TYPE_PTR end_ptr;
+
+ /* Start of packet sent for log tracing */
+ uint16 pktlen; /* Size of rest of block */
+ uint16 count; /* Logtrace counter */
+ uint32 timestamp; /* Timestamp at start of use */
+ uint32 event_logs;
+} event_log_block_t;
+
+/* There can be multiple event_sets with each logging a set of
+ * associated events (i.e, "fast" and "slow" events).
+ */
+typedef struct event_log_set {
+ _EL_BLOCK_PTR first_block; /* Pointer to first event_log block */
+ _EL_BLOCK_PTR last_block; /* Pointer to last event_log block */
+ _EL_BLOCK_PTR logtrace_block; /* next block traced */
+ _EL_BLOCK_PTR cur_block; /* Pointer to current event_log block */
+ _EL_TYPE_PTR cur_ptr; /* Current event_log pointer */
+ uint32 blockcount; /* Number of blocks */
+ uint16 logtrace_count; /* Last count for logtrace */
+ uint16 blockfill_count; /* Fill count for logtrace */
+ uint32 timestamp; /* Last timestamp event */
+ uint32 cyclecount; /* Cycles at last timestamp event */
+} event_log_set_t;
+
+/* Top data structure for access to everything else */
+typedef struct event_log_top {
+ uint32 magic;
+#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */
+ uint32 version;
+#define EVENT_LOG_VERSION 1
+ uint32 num_sets;
+ uint32 logstrs_size; /* Size of lognums + logstrs area */
+ uint32 timestamp; /* Last timestamp event */
+ uint32 cyclecount; /* Cycles at last timestamp event */
+ _EL_SET_PTR sets; /* Ptr to array of <num_sets> set ptrs */
+} event_log_top_t;
+
+/* Data structure of Keeping the Header from logstrs.bin */
+typedef struct {
+ uint32 logstrs_size; /* Size of the file */
+ uint32 rom_lognums_offset; /* Offset to the ROM lognum */
+ uint32 ram_lognums_offset; /* Offset to the RAM lognum */
+ uint32 rom_logstrs_offset; /* Offset to the ROM logstr */
+ uint32 ram_logstrs_offset; /* Offset to the RAM logstr */
+ /* Keep version and magic last since "header" is appended to the end of logstrs file. */
+ uint32 version; /* Header version */
+ uint32 log_magic; /* MAGIC number for verification 'LOGS' */
+} logstr_header_t;
+
+
+#ifndef EVENT_LOG_DUMPER
+
+#ifndef EVENT_LOG_COMPILE
+
+/* Null define if no tracing */
+#define EVENT_LOG(format, ...)
+
+#else /* EVENT_LOG_COMPILE */
+
+/* The first few are special because they can be done more efficiently
+ * this way and they are the common case. Once there are too many
+ * parameters the code size starts to be an issue and a loop is better
+ */
+#define _EVENT_LOG0(tag, fmt_num) \
+ event_log0(tag, fmt_num)
+#define _EVENT_LOG1(tag, fmt_num, t1) \
+ event_log1(tag, fmt_num, t1)
+#define _EVENT_LOG2(tag, fmt_num, t1, t2) \
+ event_log2(tag, fmt_num, t1, t2)
+#define _EVENT_LOG3(tag, fmt_num, t1, t2, t3) \
+ event_log3(tag, fmt_num, t1, t2, t3)
+#define _EVENT_LOG4(tag, fmt_num, t1, t2, t3, t4) \
+ event_log4(tag, fmt_num, t1, t2, t3, t4)
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG5(tag, fmt_num, ...) event_logn(5, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG6(tag, fmt_num, ...) event_logn(6, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG7(tag, fmt_num, ...) event_logn(7, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG8(tag, fmt_num, ...) event_logn(8, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG9(tag, fmt_num, ...) event_logn(9, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGA(tag, fmt_num, ...) event_logn(10, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGB(tag, fmt_num, ...) event_logn(11, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGC(tag, fmt_num, ...) event_logn(12, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGD(tag, fmt_num, ...) event_logn(13, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__)
+
+/* Hack to make the proper routine call when variadic macros get
+ * passed. Note the max of 15 arguments. More than that can't be
+ * handled by the event_log entries anyways so best to catch it at compile
+ * time
+ */
+
+#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9, \
+ _A, _B, _C, _D, _E, _F, N, ...) F ## N
+
+#define _EVENT_LOG(tag, fmt, ...) \
+ static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \
+ static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \
+ _EVENT_LOG_VA_NUM_ARGS(_EVENT_LOG, ##__VA_ARGS__, \
+ F, E, D, C, B, A, 9, 8, \
+ 7, 6, 5, 4, 3, 2, 1, 0) \
+ (tag, (int) &fmtnum , ## __VA_ARGS__); \
+
+
+#define EVENT_LOG_FAST(tag, fmt, ...) \
+ if (event_log_tag_sets != NULL) { \
+ uint8 tag_flag = *(event_log_tag_sets + tag); \
+ if (tag_flag != 0) { \
+ _EVENT_LOG(tag, fmt , ## __VA_ARGS__); \
+ } \
+ }
+
+#define EVENT_LOG_COMPACT(tag, fmt, ...) \
+ if (1) { \
+ _EVENT_LOG(tag, fmt , ## __VA_ARGS__); \
+ }
+
+#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__)
+
+#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG)
+
+#define EVENT_DUMP event_log_buffer
+
+extern uint8 *event_log_tag_sets;
+
+#include <siutils.h>
+
+extern int event_log_init(si_t *sih);
+extern int event_log_set_init(si_t *sih, int set_num, int size);
+extern int event_log_set_expand(si_t *sih, int set_num, int size);
+extern int event_log_set_shrink(si_t *sih, int set_num, int size);
+extern int event_log_tag_start(int tag, int set_num, int flags);
+extern int event_log_tag_stop(int tag);
+extern int event_log_get(int set_num, int buflen, void *buf);
+extern uint8 * event_log_next_logtrace(int set_num);
+
+extern void event_log0(int tag, int fmtNum);
+extern void event_log1(int tag, int fmtNum, uint32 t1);
+extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2);
+extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3);
+extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4);
+extern void event_logn(int num_args, int tag, int fmtNum, ...);
+
+extern void event_log_time_sync(void);
+extern void event_log_buffer(int tag, uint8 *buf, int size);
+
+#endif /* EVENT_LOG_DUMPER */
+
+#endif /* EVENT_LOG_COMPILE */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _EVENT_LOG_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: linux_osl.h 491170 2014-07-15 06:23:58Z $
+ * $Id: linux_osl.h 503131 2014-09-17 12:16:08Z $
*/
#ifndef _linux_osl_h_
extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
extern void* osl_get_bus_handle(osl_t *osh);
-#ifdef EXYNOS5433_PCIE_WAR
-extern void exynos_pcie_set_l1_exit(void);
-extern void exynos_pcie_clear_l1_exit(void);
-#endif /* EXYNOS5433_PCIE_WAR */
/* Global ASSERT type */
extern uint32 g_assert_type;
#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+#if defined(BCMPCIE)
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
+#define DMA_ALLOC_CONSISTENT_STATIC(osh, size, align, tot, pap, dmah, idx) \
+ osl_dma_alloc_consistent_static((osh), (size), (align), (tot), (pap), (idx))
+#define DMA_FREE_CONSISTENT_STATIC(osh, va, size, pa, dmah, idx) \
+ osl_dma_free_consistent_static((osh), (void*)(va), (size), (pa), (idx))
+
+extern void *osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align,
+ uint *tot, dmaaddr_t *pap, uint16 idx);
+extern void osl_dma_free_consistent_static(osl_t *osh, void *va, uint size, dmaaddr_t pa,
+ uint16 idx);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
+#endif /* BCMPCIE */
+
extern uint osl_dma_consistent_align(void);
extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
uint *tot, dmaaddr_t *pap);
/* register access macros */
-#ifdef EXYNOS5433_PCIE_WAR
-#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, \
- ({ \
- __typeof(*(r)) __osl_v; \
- exynos_pcie_set_l1_exit(); \
- switch (sizeof(*(r))) { \
- case sizeof(uint8): __osl_v = \
- readb((volatile uint8*)(r)); break; \
- case sizeof(uint16): __osl_v = \
- readw((volatile uint16*)(r)); break; \
- case sizeof(uint32): __osl_v = \
- readl((volatile uint32*)(r)); break; \
- } \
- exynos_pcie_clear_l1_exit(); \
- __osl_v; \
- }), \
- OSL_READ_REG(osh, r)) \
-)
-#else
#define R_REG(osh, r) (\
SELECT_BUS_READ(osh, \
({ \
}), \
OSL_READ_REG(osh, r)) \
)
-#endif /* EXYNOS5433_PCIE_WAR */
-#ifdef EXYNOS5433_PCIE_WAR
#define W_REG(osh, r, v) do { \
- exynos_pcie_set_l1_exit(); \
SELECT_BUS_WRITE(osh, \
switch (sizeof(*(r))) { \
case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
}, \
(OSL_WRITE_REG(osh, r, v))); \
- exynos_pcie_clear_l1_exit(); \
} while (0)
-#else
-#define W_REG(osh, r, v) do { \
- SELECT_BUS_WRITE(osh, \
- switch (sizeof(*(r))) { \
- case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
- case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
- case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
- }, \
- (OSL_WRITE_REG(osh, r, v))); \
- } while (0)
-#endif /* EXYNOS5433_PCIE_WAR */
#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
extern void bzero(void *b, size_t len);
#endif /* ! BCMDRIVER */
+typedef struct sec_cma_info {
+ struct sec_mem_elem *sec_alloc_list;
+ struct sec_mem_elem *sec_alloc_list_tail;
+} sec_cma_info_t;
+
+#ifdef BCM_SECURE_DMA
+
+#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \
+ osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset))
+#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \
+ osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah))
+#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \
+ osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma))
+#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \
+ osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset))
+#define SECURE_DMA_UNMAP_ALL(osh, pcma) \
+osl_sec_dma_unmap_all((osh), (pcma))
+
+#if defined(__ARM_ARCH_7A__)
+#define ACP_WAR_ENAB() 0
+#define ACP_WIN_LIMIT 0
+#define arch_is_coherent() 0
+
+#define CMA_BUFSIZE_4K 4096
+#define CMA_BUFSIZE_2K 2048
+#define CMA_BUFSIZE_512 512
+
+#define CMA_BUFNUM 2048
+#define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */
+#define SEC_CMA_COHERENT_MAX 32
+#define CMA_DMA_DESC_MEMBLOCK (SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX)
+#define CMA_DMA_DATA_MEMBLOCK (CMA_BUFSIZE_4K*CMA_BUFNUM)
+#define CMA_MEMBLOCK (CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK)
+#define CONT_ARMREGION 0x02 /* Region CMA */
+#else
+#define CONT_MIPREGION 0x00 /* To access the MIPs mem, Not yet... */
+#endif /* !defined __ARM_ARCH_7A__ */
+
+#define SEC_DMA_ALIGN (1<<16)
+typedef struct sec_mem_elem {
+ size_t size;
+ int direction;
+ phys_addr_t pa_cma; /* physical address */
+ void *va; /* virtual address of driver pkt */
+ dma_addr_t dma_handle; /* bus address assign by linux */
+ void *vac; /* virtual address of cma buffer */
+ struct sec_mem_elem *next;
+} sec_mem_elem_t;
+
+extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset);
+extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *dmah);
+extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size,
+ int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info);
+extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
+ void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset);
+extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info);
+
+#endif /* BCM_SECURE_DMA */
#endif /* _linux_osl_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: osl.h 474639 2014-05-01 23:52:31Z $
+ * $Id: osl.h 503131 2014-09-17 12:16:08Z $
*/
#ifndef _osl_h_
#define PKTFRAGISCHAINED(osh, i) (0)
/* TRIM Tail bytes from lfrag */
#define PKTFRAG_TRIM_TAILBYTES(osh, p, len) PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+#ifdef BCM_SECURE_DMA
+#define SECURE_DMA_ENAB(osh) (1)
+#else
+
+#define SECURE_DMA_ENAB(osh) (0)
+#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) {(0)})
+#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0
+#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) {(0)})
+#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset)
+#define SECURE_DMA_UNMAP_ALL(osh, pcma)
+
+#endif
#endif /* _osl_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: pcicfg.h 465082 2014-03-26 17:37:28Z $
+ * $Id: pcicfg.h 506084 2014-10-02 15:34:59Z $
*/
#ifndef _h_pcicfg_
#define PCIBAR_PREFETCH 0x8
#define PCIBAR_MEM32_MASK 0xFFFFFF80
-/* pci config status reg has a bit to indicate that capability ptr is present */
-
-#define PCI_CAPPTR_PRESENT 0x0010
-
typedef struct _pci_config_regs {
uint16 vendor;
uint16 device;
#define MINSZPCR 64 /* offsetof (dev_dep[0] */
#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
+
+/* pci config status reg has a bit to indicate that capability ptr is present */
+
+#define PCI_CAPPTR_PRESENT 0x0010
+
/* A structure for the config registers is nice, but in most
* systems the config space is not memory mapped, so we need
* field offsetts. :-(
PCI_XOR_OTHER = 0x80
} pci_xor_subclasses;
-/* Header types */
-#define PCI_HEADER_MULTI 0x80
-#define PCI_HEADER_MASK 0x7f
-typedef enum {
- PCI_HEADER_NORMAL,
- PCI_HEADER_BRIDGE,
- PCI_HEADER_CARDBUS
-} pci_header_types;
-
-
/* Overlay for a PCI-to-PCI bridge */
#define PPB_RSVDA_MAX 2
uint8 dev_dep[192];
} ppb_config_regs;
+/* Everything below is BRCM HND proprietary */
+
+
+/* Brcm PCI configuration registers */
+#define cap_list rsvd_a[0]
+#define bar0_window dev_dep[0x80 - 0x40]
+#define bar1_window dev_dep[0x84 - 0x40]
+#define sprom_control dev_dep[0x88 - 0x40]
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
/* PCI CAPABILITY DEFINES */
#define PCI_CAP_POWERMGMTCAP_ID 0x01
} pcie_enhanced_caphdr;
-/* Everything below is BRCM HND proprietary */
-
-
-/* Brcm PCI configuration registers */
-#define cap_list rsvd_a[0]
-#define bar0_window dev_dep[0x80 - 0x40]
-#define bar1_window dev_dep[0x84 - 0x40]
-#define sprom_control dev_dep[0x88 - 0x40]
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */
#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */
#define PCI_SPROM_CONTROL 0x88 /* sprom property control */
#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */
#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */
#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */
-#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */
+#define PCI_LINK_CTRL 0xbc /* PCI link control register */
+#define PCI_DEV_STAT_CTRL2 0xd4 /* PCI device status control 2 register */
+#define PCIE_LTR_MAX_SNOOP 0x1b4 /* PCIE LTRMaxSnoopLatency */
+#define PCI_L1SS_CTRL 0x248 /* The L1 PM Substates Control register */
+#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control 2 register */
/* Private Registers */
#define PCI_STAT_CTRL 0xa80
#define PCI_STAT_TA 0x08000000 /* target abort status */
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+/* Header types */
+#define PCI_HEADER_MULTI 0x80
+#define PCI_HEADER_MASK 0x7f
+typedef enum {
+ PCI_HEADER_NORMAL,
+ PCI_HEADER_BRIDGE,
+ PCI_HEADER_CARDBUS
+} pci_header_types;
+
#define PCI_CONFIG_SPACE_SIZE 256
+
+#define DWORD_ALIGN(x) (x & ~(0x03))
+#define BYTE_POS(x) (x & 0x3)
+#define WORD_POS(x) (x & 0x1)
+
+#define BYTE_SHIFT(x) (8 * BYTE_POS(x))
+#define WORD_SHIFT(x) (16 * WORD_POS(x))
+
+#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
+#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
+
+#define read_pci_cfg_byte(a) \
+ (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff)
+
+#define read_pci_cfg_word(a) \
+ (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff)
+
+#define write_pci_cfg_byte(a, val) do { \
+ uint32 tmpval; \
+ tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
+ val << BYTE_POS(a); \
+ OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+ } while (0)
+
+#define write_pci_cfg_word(a, val) do { \
+ uint32 tmpval; \
+ tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
+ val << WORD_POS(a); \
+ OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+ } while (0)
+
#endif /* _h_pcicfg_ */
*
* Fundamental types and constants relating to 802.11
*
- * $Id: 802.11.h 469158 2014-04-09 21:31:31Z $
+ * $Id: 802.11.h 495738 2014-08-08 03:36:17Z $
*/
#ifndef _802_11_H_
BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
uint8 len; /* IE length */
- uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 oui[3];
uint8 type; /* type of this IE */
uint16 cap; /* DPT capabilities */
} BWL_POST_PACKED_STRUCT;
#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */
#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */
-/**
- * Support for Broadcom proprietary HT MCS rates. Re-uses afterburner bits since afterburner is not
- * used anymore. Checks for BRF_ABCAP to stay compliant with 'old' images in the field.
- */
#define GET_BRF_PROP_11N_MCS(brcm_ie) \
(!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
/* QoS map */
#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */
-/* BCM proprietary IE type for AIBSS */
#define BCM_AIBSS_IE_TYPE 56
/* This marks the end of a packed structure section. */
*
* Dependencies: proto/bcmeth.h
*
- * $Id: bcmevent.h 490387 2014-07-10 15:12:52Z $
+ * $Id: bcmevent.h 505096 2014-09-26 12:49:04Z $
*
*/
#define WLC_E_REASON_RMC_AR_LOST 1
#define WLC_E_REASON_RMC_AR_NO_ACK 2
+#ifdef WLTDLS
+/* TDLS Action Category code */
+#define TDLS_AF_CATEGORY 12
+/* Wi-Fi Display (WFD) Vendor Specific Category */
+/* used for WFD Tunneled Probe Request and Response */
+#define TDLS_VENDOR_SPECIFIC 127
+/* TDLS Action Field Values */
+#define TDLS_ACTION_SETUP_REQ 0
+#define TDLS_ACTION_SETUP_RESP 1
+#define TDLS_ACTION_SETUP_CONFIRM 2
+#define TDLS_ACTION_TEARDOWN 3
+#define WLAN_TDLS_SET_PROBE_WFD_IE 11
+#define WLAN_TDLS_SET_SETUP_WFD_IE 12
+#endif
+
/* GAS event data */
typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wpa.h 450928 2014-01-23 14:13:38Z $
+ * $Id: wpa.h 492853 2014-07-23 17:20:34Z $
*/
#ifndef _proto_wpa_h_
#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
#define WPA2_PMKID_COUNT_LEN 2
+#define RSN_GROUPMANAGE_CIPHER_LEN 4
#ifdef BCMWAPI_WAI
#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
/*
* $Copyright Open 2009 Broadcom Corporation$
-* $Id: wlfc_proto.h 455301 2014-02-13 12:42:13Z $
+* $Id: wlfc_proto.h 499510 2014-08-28 23:40:47Z $
*
*/
#ifndef __wlfc_proto_definitions_h__
#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
-#define WLFC_PKTFLAG_PKTFROMHOST 0x01
-#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
+#define WLFC_PKTFLAG_PKTFROMHOST 0x01 /* packet originated from hot side */
+#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 /* packet requsted by firmware side */
+#define WLFC_PKTFLAG_PKT_FORCELOWRATE 0x04 /* force low rate for this packet */
#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */
#define WL_TXSTATUS_STATUS_SHIFT 24
/* b[7:5] -reuse guard, b[4:0] -value */
#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
-#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \
- (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
-
-#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
- ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
-
-
#define WLFC_MAX_PENDING_DATALEN 120
/* host is free to discard the packet */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wlioctl.h 490639 2014-07-11 08:31:53Z $
+ * $Id: wlioctl.h 504503 2014-09-24 11:28:56Z $
*/
#ifndef _wlioctl_h_
#include <bcmwifi_rates.h>
#include <devctrl_if/wlioctl_defs.h>
-#if 0 && (NDISVER >= 0x0600)
-#include <proto/bcmipv6.h>
-#endif
#ifndef LINUX_POSTMOGRIFY_REMOVAL
#include <bcm_mpool_pub.h>
uint32 bphy_badplcp;
} wl_delta_stats_t;
+
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+/* structure to store per-rate rx statistics */
+typedef struct wl_scb_rx_rate_stats {
+ uint32 rx1mbps[2]; /* packets rx at 1Mbps */
+ uint32 rx2mbps[2]; /* packets rx at 2Mbps */
+ uint32 rx5mbps5[2]; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps[2]; /* packets rx at 6Mbps */
+ uint32 rx9mbps[2]; /* packets rx at 9Mbps */
+ uint32 rx11mbps[2]; /* packets rx at 11Mbps */
+ uint32 rx12mbps[2]; /* packets rx at 12Mbps */
+ uint32 rx18mbps[2]; /* packets rx at 18Mbps */
+ uint32 rx24mbps[2]; /* packets rx at 24Mbps */
+ uint32 rx36mbps[2]; /* packets rx at 36Mbps */
+ uint32 rx48mbps[2]; /* packets rx at 48Mbps */
+ uint32 rx54mbps[2]; /* packets rx at 54Mbps */
+} wl_scb_rx_rate_stats_t;
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
typedef struct {
* Dongle pattern matching filter.
*/
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE 2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+/* Ports only filter mode */
+#define PKT_FILTER_MODE_PORTS_ONLY 16
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */
#define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \
uint32 count;
} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t;
+#define MAX_ROAMOFFL_BSSID_NUM 100
+
+typedef BWL_PRE_PACKED_STRUCT struct roamoffl_bssid_list {
+ int cnt;
+ struct ether_addr bssid[1];
+} BWL_POST_PACKED_STRUCT roamoffl_bssid_list_t;
+
/* no default structure packing */
#include <packed_section_end.h>
uint8 id;
} BWL_POST_PACKED_STRUCT;
-#if 0 && (NDISVER >= 0x0600)
-/* Return values */
-#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */
-#define ND_REQ_SINK 0x2 /* Input packet should be discarded */
-#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */
-
-
-/* Neighbor Solicitation Response Offload IOVAR param */
-typedef BWL_PRE_PACKED_STRUCT struct nd_param {
- struct ipv6_addr host_ip[2];
- struct ipv6_addr solicit_ip;
- struct ipv6_addr remote_ip;
- uint8 host_mac[ETHER_ADDR_LEN];
- uint32 offload_id;
-} BWL_POST_PACKED_STRUCT nd_param_t;
-#endif
typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh {
uint32 pfn_alert_thresh; /* time in ms */
typedef BWL_PRE_PACKED_STRUCT struct nan_scan_params {
uint16 scan_time;
uint16 home_time;
+ uint16 ms_intvl; /* interval between merge scan */
+ uint16 ms_dur; /* duration of merge scan */
uint16 chspec_num;
chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */
} BWL_POST_PACKED_STRUCT nan_scan_params_t;
wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
} wl_roam_prof_band_t;
+/* Data structures for Interface Create/Remove */
+
+#define WL_INTERFACE_CREATE_VER (0)
+
+/*
+ * The flags filed of the wl_interface_create is designed to be
+ * a Bit Mask. As of now only Bit 0 and Bit 1 are used as mentioned below.
+ * The rest of the bits can be used, incase we have to provide
+ * more information to the dongle
+ */
+
+/*
+ * Bit 0 of flags field is used to inform whether the interface requested to
+ * be created is STA or AP.
+ * 0 - Create a STA interface
+ * 1 - Create an AP interface
+ */
+#define WL_INTERFACE_CREATE_STA (0 << 0)
+#define WL_INTERFACE_CREATE_AP (1 << 0)
+
+/*
+ * Bit 1 of flags field is used to inform whether MAC is present in the
+ * data structure or not.
+ * 0 - Ignore mac_addr field
+ * 1 - Use the mac_addr field
+ */
+#define WL_INTERFACE_MAC_DONT_USE (0 << 1)
+#define WL_INTERFACE_MAC_USE (1 << 1)
+
+typedef struct wl_interface_create {
+ uint16 ver; /* version of this struct */
+ uint32 flags; /* flags that defines the operation */
+ struct ether_addr mac_addr; /* Optional Mac address */
+} wl_interface_create_t;
+
+typedef struct wl_interface_info {
+ uint16 ver; /* version of this struct */
+ struct ether_addr mac_addr; /* MAC address of the interface */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */
+ uint8 bsscfgidx; /* source bsscfg index */
+} wl_interface_info_t;
+
/* no default structure packing */
#include <packed_section_end.h>
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: linux_osl.c 490846 2014-07-12 13:08:59Z $
+ * $Id: linux_osl.c 503131 2014-09-17 12:16:08Z $
*/
#define LINUX_PORT
+#ifdef BCM_SECURE_DMA
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <asm/io.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <asm/memory.h>
+#if defined(__ARM_ARCH_7A__)
+#include <arch/arm/include/asm/tlbflush.h>
+#include <arch/arm/mm/mm.h>
+#endif
+#include <linux/brcmstb/cma_driver.h>
+#endif /* BCM_SECURE_DMA */
+
#include <linux/fs.h>
#ifdef BCM47XX_ACP_WAR
extern spinlock_t l2x0_reg_lock;
#endif
+#if defined(BCMPCIE)
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
+#include <bcmpcie.h>
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
+#endif /* BCMPCIE */
+
#define PCI_CFG_RETRY 10
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
static bcm_static_pkt_t *bcm_static_skb = 0;
+#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
+#define STATIC_BUF_FLOWRING_SIZE ((PAGE_SIZE)*(7))
+#define STATIC_BUF_FLOWRING_NUM 42
+#define RINGID_TO_FLOWID(idx) ((idx) + (BCMPCIE_H2D_COMMON_MSGRINGS) \
+ - (BCMPCIE_H2D_TXFLOWRINGID))
+typedef struct bcm_static_flowring_buf {
+ spinlock_t flowring_lock;
+ void *buf_ptr[STATIC_BUF_FLOWRING_NUM];
+ unsigned char buf_use[STATIC_BUF_FLOWRING_NUM];
+} bcm_static_flowring_buf_t;
+
+bcm_static_flowring_buf_t *bcm_static_flowring = 0;
+#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
+
void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
#endif /* CONFIG_DHD_USE_STATIC_BUF */
int ctrace_num;
#endif /* BCMDBG_CTRACE */
uint32 flags; /* If specific cases to be handled in the OSL */
+#ifdef BCM_SECURE_DMA
+ struct cma_dev *cma;
+ struct sec_mem_elem *sec_list_512;
+ struct sec_mem_elem *sec_list_base_512;
+ struct sec_mem_elem *sec_list_2048;
+ struct sec_mem_elem *sec_list_base_2048;
+ struct sec_mem_elem *sec_list_4096;
+ struct sec_mem_elem *sec_list_base_4096;
+ phys_addr_t contig_base;
+ void *contig_base_va;
+ phys_addr_t contig_base_alloc;
+ void *contig_base_alloc_va;
+ phys_addr_t contig_base_alloc_coherent;
+ void *contig_base_alloc_coherent_va;
+ phys_addr_t contig_delta_va_pa;
+ struct {
+ phys_addr_t pa;
+ void *va;
+ bool avail;
+ } sec_cma_coherent[SEC_CMA_COHERENT_MAX];
+
+#endif /* BCM_SECURE_DMA */
+
};
+#ifdef BCM_SECURE_DMA
+phys_addr_t g_contig_delta_va_pa;
+static void osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn);
+static int osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn);
+static void osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn);
+static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
+ bool iscache, bool isdecr);
+static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
+static void osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
+ sec_mem_elem_t **list);
+static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
+ void *sec_list_base);
+static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
+ int direction, struct sec_cma_info *ptr_cma_info, uint offset);
+static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
+static void osl_sec_dma_init_consistent(osl_t *osh);
+static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
+ ulong *pap);
+static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+#endif /* BCM_SECURE_DMA */
#define OSL_PKTTAG_CLEAR(p) \
do { \
/* Array bounds covered by ASSERT in osl_attach */
return linuxbcmerrormap[-bcmerror];
}
-#ifdef SHARED_OSL_CMN
+
osl_t *
+#ifdef SHARED_OSL_CMN
osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
-{
#else
-osl_t *
osl_attach(void *pdev, uint bustype, bool pkttag)
+#endif /* SHARED_OSL_CMN */
{
+#ifndef SHARED_OSL_CMN
void **osl_cmn = NULL;
#endif /* SHARED_OSL_CMN */
osl_t *osh;
osh->pub.pkttag = pkttag;
osh->bustype = bustype;
osh->magic = OS_HANDLE_MAGIC;
+#ifdef BCM_SECURE_DMA
+
+ osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION);
+
+#ifdef BCM47XX_CA9
+ osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
+ phys_to_page((u32)osh->contig_base_alloc),
+ CMA_DMA_DESC_MEMBLOCK, TRUE, TRUE);
+#else
+ osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
+ phys_to_page((u32)osh->contig_base_alloc),
+ CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
+#endif /* BCM47XX_CA9 */
+
+ osh->contig_base_alloc_coherent = osh->contig_base_alloc;
+ osl_sec_dma_init_consistent(osh);
+
+ osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
+
+ osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
+ phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
+ osh->contig_base_va = osh->contig_base_alloc_va;
+
+ /*
+ * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
+ * osh->sec_list_base_512 = osh->sec_list_512;
+ * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
+ * osh->sec_list_base_2048 = osh->sec_list_2048;
+ */
+ osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096);
+ osh->sec_list_base_4096 = osh->sec_list_4096;
+
+#endif /* BCM_SECURE_DMA */
switch (bustype) {
case PCI_BUS:
int osl_static_mem_init(osl_t *osh, void *adapter)
{
#ifdef CONFIG_DHD_USE_STATIC_BUF
- if (!bcm_static_buf && adapter) {
- if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
- 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
- printk("can not alloc static buf!\n");
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- kfree(osh);
- return -ENOMEM;
- }
- else
- printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+ if (!bcm_static_buf && adapter) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+ 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ }
+ else
+ printk("alloc static buf at %p!\n", bcm_static_buf);
- sema_init(&bcm_static_buf->static_sem, 1);
+ sema_init(&bcm_static_buf->static_sem, 1);
- bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
- }
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+ }
#ifdef BCMSDIO
- if (!bcm_static_skb && adapter) {
- int i;
- void *skb_buff_ptr = 0;
- bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
- skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
- if (!skb_buff_ptr) {
- printk("cannot alloc static buf!\n");
- bcm_static_buf = NULL;
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- kfree(osh);
- return -ENOMEM;
- }
+ if (!bcm_static_skb && adapter) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+ if (!skb_buff_ptr) {
+ printk("cannot alloc static buf!\n");
+ bcm_static_buf = NULL;
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ }
- bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
- (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
- for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
- bcm_static_skb->pkt_use[i] = 0;
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+ (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
+ for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
+ bcm_static_skb->pkt_use[i] = 0;
- sema_init(&bcm_static_skb->osl_pkt_sem, 1);
- }
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+ }
#endif /* BCMSDIO */
+#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
+ if (!bcm_static_flowring && adapter) {
+ int i;
+ void *flowring_ptr = 0;
+ bcm_static_flowring =
+ (bcm_static_flowring_buf_t *)((char *)bcm_static_buf + 4096);
+ flowring_ptr = wifi_platform_prealloc(adapter, 10, 0);
+ if (!flowring_ptr) {
+ printk("%s: flowring_ptr is NULL\n", __FUNCTION__);
+ bcm_static_buf = NULL;
+ bcm_static_skb = NULL;
+ bcm_static_flowring = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ }
+
+ bcopy(flowring_ptr, bcm_static_flowring->buf_ptr,
+ sizeof(void *) * STATIC_BUF_FLOWRING_NUM);
+ for (i = 0; i < STATIC_BUF_FLOWRING_NUM; i++) {
+ bcm_static_flowring->buf_use[i] = 0;
+ }
+
+ spin_lock_init(&bcm_static_flowring->flowring_lock);
+ }
+#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return 0;
{
if (osh == NULL)
return;
+#ifdef BCM_SECURE_DMA
+ osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION);
+ osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
+ osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
+ osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
+ osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK);
+#endif /* BCM_SECURE_DMA */
ASSERT(osh->magic == OS_HANDLE_MAGIC);
atomic_sub(1, &osh->cmn->refcount);
bcm_static_skb = 0;
}
#endif /* BCMSDIO */
+#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
+ if (bcm_static_flowring) {
+ bcm_static_flowring = 0;
+ }
+#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return 0;
}
bcm_static_skb = 0;
}
#endif /* BCMSDIO */
+#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
+ if (bcm_static_flowring) {
+ bcm_static_flowring = 0;
+ }
+#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
bb = b;
/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
* Increment a GMAC forwarder interface's pktalloced count.
*/
-#ifdef BCMDBG_CTRACE
void BCMFASTPATH
+#ifdef BCMDBG_CTRACE
osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
#else
-void BCMFASTPATH
osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
#endif /* BCMDBG_CTRACE */
{
* In the process, native packet is destroyed, there is no copying
* Also, a packettag is zeroed out
*/
-#ifdef BCMDBG_CTRACE
void * BCMFASTPATH
+#ifdef BCMDBG_CTRACE
osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
#else
-void * BCMFASTPATH
osl_pkt_frmnative(osl_t *osh, void *pkt)
#endif /* BCMDBG_CTRACE */
{
}
/* Return a new packet. zero out pkttag */
-#ifdef BCMDBG_CTRACE
void * BCMFASTPATH
+#ifdef BCMDBG_CTRACE
osl_pktget(osl_t *osh, uint len, int line, char *file)
#else
-void * BCMFASTPATH
osl_pktget(osl_t *osh, uint len)
#endif /* BCMDBG_CTRACE */
{
#ifdef CTFPOOL
/* Allocate from local pool */
skb = osl_pktfastget(osh, len);
- if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
+ if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL))
#else /* CTFPOOL */
- if ((skb = osl_alloc_skb(osh, len))) {
+ if ((skb = osl_alloc_skb(osh, len)))
#endif /* CTFPOOL */
+ {
skb->tail += len;
skb->len += len;
skb->priority = 0;
int i = 0;
struct sk_buff *skb;
+ if (!bcm_static_skb)
+ return osl_pktget(osh, len);
+
if (len > DHD_SKB_MAX_BUFSIZE) {
printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
return osl_pktget(osh, len);
bcm_static_skb->pkt_use[i] = 1;
skb = bcm_static_skb->skb_4k[i];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
up(&bcm_static_skb->osl_pkt_sem);
if (i != STATIC_PKT_MAX_NUM) {
bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1;
skb = bcm_static_skb->skb_8k[i];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
up(&bcm_static_skb->osl_pkt_sem);
bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1;
skb = bcm_static_skb->skb_16k;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
skb->len = len;
up(&bcm_static_skb->osl_pkt_sem);
return skb;
}
-#endif
+#endif /* ENHANCED_STATIC_BUF */
up(&bcm_static_skb->osl_pkt_sem);
printk("%s: all static pkt in use!\n", __FUNCTION__);
up(&bcm_static_skb->osl_pkt_sem);
osl_pktfree(osh, p, send);
}
+
+#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
+void*
+osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align_bits,
+ uint *alloced, dmaaddr_t *pap, uint16 idx)
+{
+ void *va = NULL;
+ uint16 align = (1 << align_bits);
+ uint16 flow_id = RINGID_TO_FLOWID(idx);
+ unsigned long flags;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+ size += align;
+
+ if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) {
+ printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id);
+ return osl_dma_alloc_consistent(osh, size, align_bits,
+ alloced, pap);
+ }
+
+ if (!bcm_static_flowring) {
+ printk("%s: bcm_static_flowring is not initialized\n",
+ __FUNCTION__);
+ return osl_dma_alloc_consistent(osh, size, align_bits,
+ alloced, pap);
+ }
+
+ if (size > STATIC_BUF_FLOWRING_SIZE) {
+ printk("%s: attempt to allocate huge packet, size=%d\n",
+ __FUNCTION__, size);
+ return osl_dma_alloc_consistent(osh, size, align_bits,
+ alloced, pap);
+ }
+
+ *alloced = size;
+
+ spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags);
+ if (bcm_static_flowring->buf_use[flow_id]) {
+ printk("%s: flowring %d is already alloced\n",
+ __FUNCTION__, flow_id);
+ spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
+ return NULL;
+ }
+
+ va = bcm_static_flowring->buf_ptr[flow_id];
+ if (va) {
+ *pap = (ulong)__virt_to_phys((ulong)va);
+ bcm_static_flowring->buf_use[flow_id] = 1;
+ }
+ spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
+
+ return va;
+}
+
+void
+osl_dma_free_consistent_static(osl_t *osh, void *va, uint size,
+ dmaaddr_t pa, uint16 idx)
+{
+ uint16 flow_id = RINGID_TO_FLOWID(idx);
+ unsigned long flags;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) {
+ printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id);
+ return osl_dma_free_consistent(osh, va, size, pa);
+ }
+
+ if (!bcm_static_flowring) {
+ printk("%s: bcm_static_flowring is not initialized\n",
+ __FUNCTION__);
+ return osl_dma_free_consistent(osh, va, size, pa);
+ }
+
+ spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags);
+ if (bcm_static_flowring->buf_use[flow_id]) {
+ bcm_static_flowring->buf_use[flow_id] = 0;
+ } else {
+ printk("%s: flowring %d is already freed\n",
+ __FUNCTION__, flow_id);
+ }
+ spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
+}
+#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
uint32
size += align;
*alloced = size;
+#ifndef BCM_SECURE_DMA
#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
if (va)
#else
{
dma_addr_t pap_lin;
- va = pci_alloc_consistent1(osh->pdev, size, &pap_lin);
+ struct pci_dev *hwdev = osh->pdev;
+#ifdef PCIE_TX_DEFERRAL
+ va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_KERNEL);
+#else
+ va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_ATOMIC);
+#endif
*pap = (dmaaddr_t)pap_lin;
}
#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+#else
+ va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
+#endif /* BCM_SECURE_DMA */
return va;
}
void
osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
{
+#ifndef BCM_SECURE_DMA
+#if !defined(BCM47XX_CA9) || !defined(__ARM_ARCH_7A__)
+ struct pci_dev *hwdev = osh->pdev;
+#endif
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
kfree(va);
#else
- pci_free_consistent1(osh->pdev, size, va, (dma_addr_t)pa);
+ dma_free_coherent(&hwdev->dev, size, va, (dma_addr_t)pa);
#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+#else
+ osl_sec_dma_free_consistent(osh, va, size, pa);
+#endif /* BCM_SECURE_DMA */
}
dmaaddr_t BCMFASTPATH
inline void BCMFASTPATH
osl_cache_flush(void *va, uint size)
{
+#ifndef BCM_SECURE_DMA
#ifdef BCM47XX_ACP_WAR
if (virt_to_phys(va) < ACP_WIN_LIMIT)
return;
#endif
if (size > 0)
dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
+#else
+ phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa);
+ if (size > 0)
+ dma_sync_single_for_device(OSH_NULL, orig_pa, size, DMA_TX);
+#endif /* defined BCM_SECURE_DMA */
}
inline void BCMFASTPATH
osl_cache_inv(void *va, uint size)
{
+#ifndef BCM_SECURE_DMA
#ifdef BCM47XX_ACP_WAR
if (virt_to_phys(va) < ACP_WIN_LIMIT)
return;
#endif
dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
+#else
+ phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa);
+ dma_sync_single_for_cpu(OSH_NULL, orig_pa, size, DMA_RX);
+#endif /* defined BCM_SECURE_DMA */
}
inline void osl_prefetch(const void *ptr)
/* Clone a packet.
* The pkttag contents are NOT cloned.
*/
-#ifdef BCMDBG_CTRACE
void *
+#ifdef BCMDBG_CTRACE
osl_pktdup(osl_t *osh, void *skb, int line, char *file)
#else
-void *
osl_pktdup(osl_t *osh, void *skb)
#endif /* BCMDBG_CTRACE */
{
{
return (osh->flags & mask);
}
+#ifdef BCM_SECURE_DMA
+
+static void
+osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn)
+{
+ int ret;
+
+#if defined(__ARM_ARCH_7A__)
+ if (regn == CONT_ARMREGION) {
+ ret = osl_sec_dma_alloc_contig_mem(osh, memsize, regn);
+ if (ret != BCME_OK)
+ printk("linux_osl.c: CMA memory access failed\n");
+ }
+#endif
+ /* implement the MIPS Here */
+}
+
+static int
+osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn)
+{
+ u64 addr;
+
+ printk("linux_osl.c: The value of cma mem block size = %ld\n", memsize);
+ osh->cma = cma_dev_get_cma_dev(regn);
+ printk("The value of cma = %p\n", osh->cma);
+ if (!osh->cma) {
+ printk("linux_osl.c:contig_region index is invalid\n");
+ return BCME_ERROR;
+ }
+ if (cma_dev_get_mem(osh->cma, &addr, (u32)memsize, SEC_DMA_ALIGN) < 0) {
+ printk("linux_osl.c: contiguous memory block allocation failure\n");
+ return BCME_ERROR;
+ }
+ osh->contig_base_alloc = (phys_addr_t)addr;
+ osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
+ printk("contig base alloc=%lx \n", (ulong)osh->contig_base_alloc);
+
+ return BCME_OK;
+}
+
+static void
+osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn)
+{
+ int ret;
+
+ ret = cma_dev_put_mem(osh->cma, (u64)osh->contig_base, memsize);
+ if (ret)
+ printf("%s contig base free failed\n", __FUNCTION__);
+}
+
+static void *
+osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
+{
+
+ struct page **map;
+ int order, i;
+ void *addr = NULL;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
+
+ if (map == NULL)
+ return NULL;
+
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ map[i] = page + i;
+
+ if (iscache) {
+ addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
+ if (isdecr) {
+ osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
+ g_contig_delta_va_pa = osh->contig_delta_va_pa;
+ }
+ }
+ else {
+
+#if defined(__ARM_ARCH_7A__)
+ addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
+ pgprot_noncached(__pgprot(PAGE_KERNEL)));
+#endif
+ if (isdecr) {
+ osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
+ g_contig_delta_va_pa = osh->contig_delta_va_pa;
+ }
+ }
+
+ kfree(map);
+ return (void *)addr;
+}
+
+static void
+osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
+{
+ vunmap(contig_base_va);
+}
+
+static void
+osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
+{
+ if (sec_list_base)
+ kfree(sec_list_base);
+}
+
+static void
+osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
+{
+ int i;
+ sec_mem_elem_t *sec_mem_elem;
+
+ if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
+
+ *list = sec_mem_elem;
+ bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
+ for (i = 0; i < max-1; i++) {
+ sec_mem_elem->next = (sec_mem_elem + 1);
+ sec_mem_elem->size = mbsize;
+ sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc;
+ sec_mem_elem->vac = osh->contig_base_alloc_va;
+
+ osh->contig_base_alloc += mbsize;
+ osh->contig_base_alloc_va += mbsize;
+
+ sec_mem_elem = sec_mem_elem + 1;
+ }
+ sec_mem_elem->next = NULL;
+ sec_mem_elem->size = mbsize;
+ sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc;
+ sec_mem_elem->vac = osh->contig_base_alloc_va;
+
+ osh->contig_base_alloc += mbsize;
+ osh->contig_base_alloc_va += mbsize;
+
+ }
+ else
+ printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
+}
+
+
+static sec_mem_elem_t * BCMFASTPATH
+osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
+ struct sec_cma_info *ptr_cma_info, uint offset)
+{
+ sec_mem_elem_t *sec_mem_elem = NULL;
+
+ if (size <= 512 && osh->sec_list_512) {
+ sec_mem_elem = osh->sec_list_512;
+ osh->sec_list_512 = sec_mem_elem->next;
+ }
+ else if (size <= 2048 && osh->sec_list_2048) {
+ sec_mem_elem = osh->sec_list_2048;
+ osh->sec_list_2048 = sec_mem_elem->next;
+ }
+ else if (osh->sec_list_4096) {
+ sec_mem_elem = osh->sec_list_4096;
+ osh->sec_list_4096 = sec_mem_elem->next;
+ } else {
+ printf("%s No matching Pool available size=%d \n", __FUNCTION__, size);
+ return NULL;
+ }
+
+ if (sec_mem_elem != NULL) {
+ sec_mem_elem->next = NULL;
+
+ if (ptr_cma_info->sec_alloc_list_tail) {
+ ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
+ }
+
+ ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
+ if (ptr_cma_info->sec_alloc_list == NULL)
+ ptr_cma_info->sec_alloc_list = sec_mem_elem;
+ }
+ return sec_mem_elem;
+}
+
+static void BCMFASTPATH
+osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
+{
+ sec_mem_elem->dma_handle = 0x0;
+ sec_mem_elem->va = NULL;
+
+ if (sec_mem_elem->size == 512) {
+ sec_mem_elem->next = osh->sec_list_512;
+ osh->sec_list_512 = sec_mem_elem;
+ }
+ else if (sec_mem_elem->size == 2048) {
+ sec_mem_elem->next = osh->sec_list_2048;
+ osh->sec_list_2048 = sec_mem_elem;
+ }
+ else if (sec_mem_elem->size == 4096) {
+ sec_mem_elem->next = osh->sec_list_4096;
+ osh->sec_list_4096 = sec_mem_elem;
+ }
+ else
+ printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size);
+}
+
+
+static sec_mem_elem_t * BCMFASTPATH
+osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
+{
+ sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
+ sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
+
+ if (sec_mem_elem->dma_handle == dma_handle) {
+
+ ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
+
+ if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
+ ptr_cma_info->sec_alloc_list_tail = NULL;
+ ASSERT(ptr_cma_info->sec_alloc_list == NULL);
+ }
+
+ return sec_mem_elem;
+ }
+
+ while (sec_mem_elem != NULL) {
+
+ if (sec_mem_elem->dma_handle == dma_handle) {
+
+ sec_prv_elem->next = sec_mem_elem->next;
+ if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
+ ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
+
+ return sec_mem_elem;
+ }
+ sec_prv_elem = sec_mem_elem;
+ sec_mem_elem = sec_mem_elem->next;
+ }
+ return NULL;
+}
+
+static sec_mem_elem_t *
+osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
+{
+ sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
+
+ if (sec_mem_elem) {
+
+ ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
+
+ if (ptr_cma_info->sec_alloc_list == NULL)
+ ptr_cma_info->sec_alloc_list_tail = NULL;
+
+ return sec_mem_elem;
+
+ } else
+ return NULL;
+}
+
+static void * BCMFASTPATH
+osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
+{
+ return ptr_cma_info->sec_alloc_list_tail;
+}
+
+dma_addr_t BCMFASTPATH
+osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *dmah, void *ptr_cma_info)
+{
+ sec_mem_elem_t *sec_mem_elem;
+ struct page *pa_cma_page;
+ uint loffset;
+ void *vaorig = va + size;
+ dma_addr_t dma_handle = 0x0;
+ /* packet will be the one added with osl_sec_dma_map() just before this call */
+
+ sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
+
+ if (sec_mem_elem && sec_mem_elem->va == vaorig) {
+
+ pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
+ loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
+
+ dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size,
+ (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
+
+ } else {
+ printf("%s: error orig va not found va = 0x%p \n",
+ __FUNCTION__, vaorig);
+ }
+ return dma_handle;
+}
+
+dma_addr_t BCMFASTPATH
+osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
+{
+
+ sec_mem_elem_t *sec_mem_elem;
+ struct page *pa_cma_page;
+ void *pa_cma_kmap_va = NULL;
+ int *fragva;
+ uint buflen = 0;
+ struct sk_buff *skb;
+ dma_addr_t dma_handle = 0x0;
+ uint loffset;
+ int i = 0;
+
+ sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
+
+ if (sec_mem_elem == NULL) {
+ printk("linux_osl.c: osl_sec_dma_map - cma allocation failed\n");
+ return 0;
+ }
+ sec_mem_elem->va = va;
+ sec_mem_elem->direction = direction;
+ pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
+
+ loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
+ /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
+ * pa_cma_kmap_va += loffset;
+ */
+
+ pa_cma_kmap_va = sec_mem_elem->vac;
+
+ if (direction == DMA_TX) {
+
+ if (p == NULL) {
+
+ memcpy(pa_cma_kmap_va+offset, va, size);
+ buflen = size;
+ }
+ else {
+ for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
+ if (skb_is_nonlinear(skb)) {
+
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ fragva = kmap_atomic(skb_frag_page(f));
+ memcpy((pa_cma_kmap_va+offset+buflen),
+ (fragva + f->page_offset), skb_frag_size(f));
+ kunmap_atomic(fragva);
+ buflen += skb_frag_size(f);
+ }
+ }
+ else {
+ memcpy((pa_cma_kmap_va+offset+buflen), skb->data, skb->len);
+ buflen += skb->len;
+ }
+ }
+
+ }
+ if (dmah) {
+ dmah->nsegs = 1;
+ dmah->origsize = buflen;
+ }
+ }
+
+ else if (direction == DMA_RX)
+ {
+ buflen = size;
+ if ((p != NULL) && (dmah != NULL)) {
+ dmah->nsegs = 1;
+ dmah->origsize = buflen;
+ }
+ }
+ if (direction == DMA_RX || direction == DMA_TX) {
+
+ dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset+offset, buflen,
+ (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
+
+ }
+ if (dmah) {
+ dmah->segs[0].addr = dma_handle;
+ dmah->segs[0].length = buflen;
+ }
+ sec_mem_elem->dma_handle = dma_handle;
+ /* kunmap_atomic(pa_cma_kmap_va-loffset); */
+ return dma_handle;
+}
+
+dma_addr_t BCMFASTPATH
+osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
+{
+
+ struct page *pa_cma_page;
+ phys_addr_t pa_cma;
+ dma_addr_t dma_handle = 0x0;
+ uint loffset;
+
+ pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa);
+ pa_cma_page = phys_to_page(pa_cma);
+ loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
+
+ dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size,
+ (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
+
+ return dma_handle;
+
+}
+
+void BCMFASTPATH
+osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
+void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
+{
+ sec_mem_elem_t *sec_mem_elem;
+ struct page *pa_cma_page;
+ void *pa_cma_kmap_va = NULL;
+ uint buflen = 0;
+ dma_addr_t pa_cma;
+ void *va;
+ uint loffset = 0;
+ int read_count = 0;
+ BCM_REFERENCE(buflen);
+ BCM_REFERENCE(read_count);
+
+ sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
+ if (sec_mem_elem == NULL) {
+ printf("%s sec_mem_elem is NULL and dma_handle =0x%lx and dir=%d\n",
+ __FUNCTION__, (ulong)dma_handle, direction);
+ return;
+ }
+
+ va = sec_mem_elem->va;
+ va -= offset;
+ pa_cma = sec_mem_elem->pa_cma;
+
+ pa_cma_page = phys_to_page(pa_cma);
+ loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
+
+ if (direction == DMA_RX) {
+
+ if (p == NULL) {
+
+ /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
+ * pa_cma_kmap_va += loffset;
+ */
+
+ pa_cma_kmap_va = sec_mem_elem->vac;
+
+ dma_unmap_page(osh->cma->dev, pa_cma, size, DMA_FROM_DEVICE);
+ memcpy(va, pa_cma_kmap_va, size);
+ /* kunmap_atomic(pa_cma_kmap_va); */
+ }
+ } else {
+ dma_unmap_page(osh->cma->dev, pa_cma, size+offset, DMA_TO_DEVICE);
+ }
+
+ osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
+}
+
+void
+osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
+{
+
+ sec_mem_elem_t *sec_mem_elem;
+
+ sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
+
+ while (sec_mem_elem != NULL) {
+
+ dma_unmap_page(osh->cma->dev, sec_mem_elem->pa_cma, sec_mem_elem->size,
+ sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
+
+ sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
+ }
+}
+
+static void
+osl_sec_dma_init_consistent(osl_t *osh)
+{
+ int i;
+ void *temp_va = osh->contig_base_alloc_coherent_va;
+ phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
+
+ for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
+ osh->sec_cma_coherent[i].avail = TRUE;
+ osh->sec_cma_coherent[i].va = temp_va;
+ osh->sec_cma_coherent[i].pa = temp_pa;
+ temp_va += SEC_CMA_COHERENT_BLK;
+ temp_pa += SEC_CMA_COHERENT_BLK;
+ }
+}
+
+static void *
+osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
+{
+
+ void *temp_va = NULL;
+ ulong temp_pa = 0;
+ int i;
+
+ if (size > SEC_CMA_COHERENT_BLK) {
+ printf("%s unsupported size\n", __FUNCTION__);
+ return NULL;
+ }
+
+ for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
+ if (osh->sec_cma_coherent[i].avail == TRUE) {
+ temp_va = osh->sec_cma_coherent[i].va;
+ temp_pa = osh->sec_cma_coherent[i].pa;
+ osh->sec_cma_coherent[i].avail = FALSE;
+ break;
+ }
+ }
+
+ if (i == SEC_CMA_COHERENT_MAX)
+ printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
+ temp_va, (ulong)temp_pa, size);
+
+ *pap = (unsigned long)temp_pa;
+ return temp_va;
+}
+
+static void
+osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+ int i = 0;
+
+ for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
+ if (osh->sec_cma_coherent[i].va == va) {
+ osh->sec_cma_coherent[i].avail = TRUE;
+ break;
+ }
+ }
+ if (i == SEC_CMA_COHERENT_MAX)
+ printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
+ va, (ulong)pa, size);
+}
+
+#endif /* BCM_SECURE_DMA */
*/
-const char RKWIFI_DRV_VERSION[] = "5.40.WFD.OOB.64bit";
+const char RKWIFI_DRV_VERSION[] = "5.10.WFD.OOB.64bit";
const char WIFI_MODULE_NAME[] = "";
const char CONFIG_BCMDHD_FW_PATH[] = "";
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: siutils.c 481602 2014-05-29 22:43:34Z $
+ * $Id: siutils.c 497460 2014-08-19 15:14:13Z $
*/
#include <bcm_cfg.h>
#endif /* BCMLTECOEX */
+
/* global variable to indicate reservation/release of gpio's */
static uint32 si_gpioreservation = 0;
SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
return NULL;
}
+#ifdef COSTOMER_HW4
+#ifdef CONFIG_MACH_UNIVERSAL5433
+ /* old revision check */
+ if (!check_rev()) {
+ /* abnormal link status */
+ if (!check_pcie_link_status()) {
+ printk("%s : PCIE LINK is abnormal status\n", __FUNCTION__);
+ return NULL;
+ }
+ }
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
+#endif
w = R_REG(osh, &cc->chipid);
if ((w & 0xfffff) == 148277) w -= 65532;
sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_android.c 490852 2014-07-12 15:20:53Z $
+ * $Id: wl_android.c 505064 2014-09-26 09:40:28Z $
*/
#include <linux/module.h>
#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
#define CMD_RSSI "RSSI"
#define CMD_LINKSPEED "LINKSPEED"
+#ifdef PKT_FILTER_SUPPORT
#define CMD_RXFILTER_START "RXFILTER-START"
#define CMD_RXFILTER_STOP "RXFILTER-STOP"
#define CMD_RXFILTER_ADD "RXFILTER-ADD"
#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE"
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#define CMD_PKT_FILTER_MODE "PKT_FILTER_MODE"
+#define CMD_PKT_FILTER_PORTS "PKT_FILTER_PORTS"
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+#endif /* PKT_FILTER_SUPPORT */
#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
#define CMD_BTCOEXMODE "BTCOEXMODE"
#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#define CMD_SETMIRACAST "SETMIRACAST"
+#define CMD_ASSOCRESPIE "ASSOCRESPIE"
+#define CMD_RXRATESTATS "RXRATESTATS"
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
#define CMD_KEEP_ALIVE "KEEPALIVE"
/* CCX Private Commands */
#endif /* WLAIBSS */
#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD"
+#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST"
+#define CMD_GET_LINK_STATUS "GETLINKSTATUS"
+
+#ifdef P2PRESP_WFDIE_SRC
+#define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP"
+#define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP"
+#endif /* P2PRESP_WFDIE_SRC */
+
+/* related with CMD_GET_LINK_STATUS */
+#define WL_ANDROID_LINK_VHT 0x01
+#define WL_ANDROID_LINK_MIMO 0x02
+#define WL_ANDROID_LINK_AP_VHT_SUPPORT 0x04
+#define WL_ANDROID_LINK_AP_MIMO_SUPPORT 0x08
/* miracast related definition */
#define MIRACAST_MODE_OFF 0
#define MIRACAST_MCHAN_BW 25
#endif
+#ifdef CONNECTION_STATISTICS
+#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS"
+
+struct connection_stats {
+ u32 txframe;
+ u32 txbyte;
+ u32 txerror;
+ u32 rxframe;
+ u32 rxbyte;
+ u32 txfail;
+ u32 txretry;
+ u32 txretrie;
+ u32 txrts;
+ u32 txnocts;
+ u32 txexptime;
+ u32 txrate;
+ u8 chan_idle;
+};
+#endif /* CONNECTION_STATISTICS */
+
static LIST_HEAD(miracast_resume_list);
#ifdef WL_CFG80211
static u8 miracast_cur_mode;
int wl_android_wifi_on(struct net_device *dev)
{
int ret = 0;
+#ifdef CONFIG_MACH_UNIVERSAL5433
+ int retry;
+ /* Do not retry old revision Helsinki Prime */
+ if (!check_rev()) {
+ retry = 1;
+ } else {
+ retry = POWERUP_MAX_RETRY;
+ }
+#else
int retry = POWERUP_MAX_RETRY;
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
if (!dev) {
ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
dhd_net_if_unlock(dev);
return ret;
-err:
#ifdef BCMSDIO
+err:
dhd_net_bus_devreset(dev, TRUE);
dhd_net_bus_suspend(dev);
-#endif
dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
printk("%s: Failed\n", __FUNCTION__);
dhd_net_if_unlock(dev);
-
return ret;
+#endif
}
int wl_android_wifi_off(struct net_device *dev)
return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
}
+#ifdef CONNECTION_STATISTICS
+static int
+wl_chanim_stats(struct net_device *dev, u8 *chan_idle)
+{
+ int err;
+ wl_chanim_stats_t *list;
+ /* Parameter _and_ returned buffer of chanim_stats. */
+ wl_chanim_stats_t param;
+ u8 result[WLC_IOCTL_SMLEN];
+ chanim_stats_t *stats;
+
+ memset(¶m, 0, sizeof(param));
+ memset(result, 0, sizeof(result));
+
+ param.buflen = htod32(sizeof(wl_chanim_stats_t));
+ param.count = htod32(WL_CHANIM_COUNT_ONE);
+
+ if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)¶m, sizeof(wl_chanim_stats_t),
+ (char*)result, sizeof(result), 0)) < 0) {
+ ANDROID_ERROR(("Failed to get chanim results %d \n", err));
+ return err;
+ }
+
+ list = (wl_chanim_stats_t*)result;
+
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+ if (list->buflen == 0) {
+ list->version = 0;
+ list->count = 0;
+ } else if (list->version != WL_CHANIM_STATS_VERSION) {
+ ANDROID_ERROR(("Sorry, firmware has wl_chanim_stats version %d "
+ "but driver supports only version %d.\n",
+ list->version, WL_CHANIM_STATS_VERSION));
+ list->buflen = 0;
+ list->count = 0;
+ }
+
+ stats = list->stats;
+ stats->glitchcnt = dtoh32(stats->glitchcnt);
+ stats->badplcp = dtoh32(stats->badplcp);
+ stats->chanspec = dtoh16(stats->chanspec);
+ stats->timestamp = dtoh32(stats->timestamp);
+ stats->chan_idle = dtoh32(stats->chan_idle);
+
+ ANDROID_INFO(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n",
+ stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle,
+ stats->timestamp));
+
+ *chan_idle = stats->chan_idle;
+
+ return (err);
+}
+
+static int
+wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len)
+{
+ wl_cnt_t* cnt = NULL;
+ int link_speed = 0;
+ struct connection_stats *output;
+ unsigned int bufsize = 0;
+ int bytes_written = 0;
+ int ret = 0;
+
+ ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__));
+
+ if (total_len <= 0) {
+ ANDROID_ERROR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len));
+ goto error;
+ }
+
+ bufsize = total_len;
+ if (bufsize < sizeof(struct connection_stats)) {
+ ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%u\n",
+ __FUNCTION__, bufsize,
+ sizeof(struct connection_stats)));
+ goto error;
+ }
+
+ if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) {
+ ANDROID_ERROR(("kmalloc failed\n"));
+ return -1;
+ }
+ memset(cnt, 0, sizeof(*cnt));
+
+ ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, (char *)cnt, sizeof(wl_cnt_t), NULL);
+ if (ret) {
+ ANDROID_ERROR(("%s: wldev_iovar_getbuf() failed, ret=%d\n",
+ __FUNCTION__, ret));
+ goto error;
+ }
+
+ if (dtoh16(cnt->version) > WL_CNT_T_VERSION) {
+ ANDROID_ERROR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n",
+ __FUNCTION__, WL_CNT_T_VERSION, cnt->version));
+ goto error;
+ }
+
+ /* link_speed is in kbps */
+ ret = wldev_get_link_speed(dev, &link_speed);
+ if (ret || link_speed < 0) {
+ ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
+ __FUNCTION__, ret, link_speed));
+ goto error;
+ }
+
+ output = (struct connection_stats *)command;
+ output->txframe = dtoh32(cnt->txframe);
+ output->txbyte = dtoh32(cnt->txbyte);
+ output->txerror = dtoh32(cnt->txerror);
+ output->rxframe = dtoh32(cnt->rxframe);
+ output->rxbyte = dtoh32(cnt->rxbyte);
+ output->txfail = dtoh32(cnt->txfail);
+ output->txretry = dtoh32(cnt->txretry);
+ output->txretrie = dtoh32(cnt->txretrie);
+ output->txrts = dtoh32(cnt->txrts);
+ output->txnocts = dtoh32(cnt->txnocts);
+ output->txexptime = dtoh32(cnt->txexptime);
+ output->txrate = link_speed;
+
+ /* Channel idle ratio. */
+ if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) {
+ output->chan_idle = 0;
+ };
+
+ kfree(cnt);
+
+ bytes_written = sizeof(struct connection_stats);
+ return bytes_written;
+
+error:
+ if (cnt) {
+ kfree(cnt);
+ }
+ return -1;
+}
+#endif /* CONNECTION_STATISTICS */
static int
wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
return res;
}
+
+static const char *
+get_string_by_separator(char *result, int result_len, const char *src, char separator)
+{
+ char *end = result + result_len - 1;
+ while ((result != end) && (*src != separator) && (*src)) {
+ *result++ = *src++;
+ }
+ *result = 0;
+ if (*src == separator)
+ ++src;
+ return src;
+}
+
+int
+wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd)
+{
+ char sbuf[32];
+ int i, cnt, size, err, ioctl_buf_len;
+ roamoffl_bssid_list_t *bssid_list;
+ const char *str = cmd;
+ char *ioctl_buf;
+
+ str = get_string_by_separator(sbuf, 32, str, ',');
+ cnt = bcm_atoi(sbuf);
+ cnt = MIN(cnt, MAX_ROAMOFFL_BSSID_NUM);
+ size = sizeof(int) + sizeof(struct ether_addr) * cnt;
+ ANDROID_ERROR(("ROAM OFFLOAD BSSID LIST %d BSSIDs, size %d\n", cnt, size));
+ bssid_list = kmalloc(size, GFP_KERNEL);
+ if (bssid_list == NULL) {
+ ANDROID_ERROR(("%s: memory alloc for bssid list(%d) failed\n",
+ __FUNCTION__, size));
+ return -ENOMEM;
+ }
+ ioctl_buf_len = size + 64;
+ ioctl_buf = kmalloc(ioctl_buf_len, GFP_KERNEL);
+ if (ioctl_buf == NULL) {
+ ANDROID_ERROR(("%s: memory alloc for ioctl_buf(%d) failed\n",
+ __FUNCTION__, ioctl_buf_len));
+ kfree(bssid_list);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ str = get_string_by_separator(sbuf, 32, str, ',');
+ if (bcm_ether_atoe(sbuf, &bssid_list->bssid[i]) == 0) {
+ ANDROID_ERROR(("%s: Invalid station MAC Address!!!\n", __FUNCTION__));
+ kfree(bssid_list);
+ kfree(ioctl_buf);
+ return -1;
+ }
+ }
+
+ bssid_list->cnt = cnt;
+ err = wldev_iovar_setbuf(dev, "roamoffl_bssid_list",
+ bssid_list, size, ioctl_buf, ioctl_buf_len, NULL);
+ kfree(bssid_list);
+ kfree(ioctl_buf);
+
+ return err;
+}
+
+#ifdef P2PRESP_WFDIE_SRC
+static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int only_resp_wfdsrc = 0;
+
+ error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n",
+ __FUNCTION__, error));
+ return -1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_P2P_GET_WFDIE_RESP, only_resp_wfdsrc);
+
+ return bytes_written;
+}
+
+static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsrc)
+{
+ int error = 0;
+
+ error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n",
+ __FUNCTION__, only_resp_wfdsrc, error));
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* P2PRESP_WFDIE_SRC */
+
+static int wl_android_get_link_status(struct net_device *dev, char *command,
+ int total_len)
+{
+ int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map;
+ uint32 rspec;
+ uint encode, rate, txexp;
+ struct wl_bss_info *bi;
+ int datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
+ char buf[datalen];
+
+ /* get BSS information */
+ *(u32 *) buf = htod32(datalen);
+ error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void *)buf, datalen, false);
+ if (unlikely(error)) {
+ ANDROID_ERROR(("Could not get bss info %d\n", error));
+ return -1;
+ }
+
+ bi = (struct wl_bss_info *) (buf + sizeof(uint32));
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ if (bi->BSSID.octet[i] > 0) {
+ break;
+ }
+ }
+
+ if (i == ETHER_ADDR_LEN) {
+ ANDROID_TRACE(("No BSSID\n"));
+ return -1;
+ }
+
+ /* check VHT capability at beacon */
+ if (bi->vht_cap) {
+ if (CHSPEC_IS5G(bi->chanspec)) {
+ result |= WL_ANDROID_LINK_AP_VHT_SUPPORT;
+ }
+ }
+
+ /* get a rspec (radio spectrum) rate */
+ error = wldev_iovar_getint(dev, "nrate", &rspec);
+ if (unlikely(error) || rspec == 0) {
+ ANDROID_ERROR(("get link status error (%d)\n", error));
+ return -1;
+ }
+
+ encode = (rspec & WL_RSPEC_ENCODING_MASK);
+ rate = (rspec & WL_RSPEC_RATE_MASK);
+ txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
+
+ switch (encode) {
+ case WL_RSPEC_ENCODE_HT:
+ /* check Rx MCS Map for HT */
+ for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+ int8 bitmap = 0xFF;
+ if (i == MAX_STREAMS_SUPPORTED-1) {
+ bitmap = 0x7F;
+ }
+ if (bi->basic_mcs[i] & bitmap) {
+ nss++;
+ }
+ }
+ break;
+ case WL_RSPEC_ENCODE_VHT:
+ /* check Rx MCS Map for VHT */
+ for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+ if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+ nss++;
+ }
+ }
+ break;
+ }
+
+ /* check MIMO capability with nss in beacon */
+ if (nss > 1) {
+ result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT;
+ }
+
+ single_stream = (encode == WL_RSPEC_ENCODE_RATE) ||
+ ((encode == WL_RSPEC_ENCODE_HT) && rate < 8) ||
+ ((encode == WL_RSPEC_ENCODE_VHT) &&
+ ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1);
+
+ if (txexp == 0) {
+ if ((rspec & WL_RSPEC_STBC) && single_stream) {
+ stf = OLD_NRATE_STF_STBC;
+ } else {
+ stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM;
+ }
+ } else if (txexp == 1 && single_stream) {
+ stf = OLD_NRATE_STF_CDD;
+ }
+
+ /* check 11ac (VHT) */
+ if (encode == WL_RSPEC_ENCODE_VHT) {
+ if (CHSPEC_IS5G(bi->chanspec)) {
+ result |= WL_ANDROID_LINK_VHT;
+ }
+ }
+
+ /* check MIMO */
+ if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) {
+ switch (stf) {
+ case OLD_NRATE_STF_SISO:
+ break;
+ case OLD_NRATE_STF_CDD:
+ case OLD_NRATE_STF_STBC:
+ result |= WL_ANDROID_LINK_MIMO;
+ break;
+ case OLD_NRATE_STF_SDM:
+ if (!single_stream) {
+ result |= WL_ANDROID_LINK_MIMO;
+ }
+ break;
+ }
+ }
+
+ ANDROID_TRACE(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n",
+ __FUNCTION__, result, stf, single_stream, nss));
+
+ bytes_written = sprintf(command, "%s %d", CMD_GET_LINK_STATUS, result);
+
+ return bytes_written;
+}
+
int
wl_android_get_channel(
struct net_device *dev, char* command, int total_len)
int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
}
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+ else if (strnicmp(command, CMD_PKT_FILTER_MODE, strlen(CMD_PKT_FILTER_MODE)) == 0) {
+ dhd_set_packet_filter_mode(net, &command[strlen(CMD_PKT_FILTER_MODE) + 1]);
+ } else if (strnicmp(command, CMD_PKT_FILTER_PORTS, strlen(CMD_PKT_FILTER_PORTS)) == 0) {
+ bytes_written = dhd_set_packet_filter_ports(net,
+ &command[strlen(CMD_PKT_FILTER_PORTS) + 1]);
+ ret = bytes_written;
+ }
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
#endif /* PKT_FILTER_SUPPORT */
else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
/* TBD: BTCOEXSCAN-START */
}
#ifdef WL_CFG80211
/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
- //else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
- // char *country_code = command + strlen(CMD_COUNTRY) + 1;
- // bytes_written = wldev_set_country(net, country_code, true, true);
- //}
+ else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ char *country_code = command + strlen(CMD_COUNTRY) + 1;
+#ifdef CUSTOMER_HW5
+ /* Customer_hw5 want to keep connections */
+ bytes_written = wldev_set_country(net, country_code, true, false);
+#else
+ bytes_written = wldev_set_country(net, country_code, true, true);
+#endif
+ }
#endif /* WL_CFG80211 */
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+ else if (strnicmp(command, CMD_SETMIRACAST, strlen(CMD_SETMIRACAST)) == 0)
+ bytes_written = wldev_miracast_tuning(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_ASSOCRESPIE, strlen(CMD_ASSOCRESPIE)) == 0)
+ bytes_written = wldev_get_assoc_resp_ie(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_RXRATESTATS, strlen(CMD_RXRATESTATS)) == 0)
+ bytes_written = wldev_get_rx_rate_stats(net, command, priv_cmd.total_len);
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
bytes_written = wl_android_set_ibss_beacon_ouidata(net,
command, priv_cmd.total_len);
int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
}
+ else if (strnicmp(command, CMD_ROAM_OFFLOAD_APLIST, strlen(CMD_ROAM_OFFLOAD_APLIST)) == 0) {
+ bytes_written = wl_android_set_roam_offload_bssid_list(net,
+ command + strlen(CMD_ROAM_OFFLOAD_APLIST) + 1);
+ }
+#endif
+#ifdef P2PRESP_WFDIE_SRC
+ else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP,
+ strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) {
+ int mode = *(command + strlen(CMD_P2P_SET_WFDIE_RESP) + 1) - '0';
+ bytes_written = wl_android_set_wfdie_resp(net, mode);
+ } else if (strnicmp(command, CMD_P2P_GET_WFDIE_RESP,
+ strlen(CMD_P2P_GET_WFDIE_RESP)) == 0) {
+ bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len);
+ }
+#endif /* P2PRESP_WFDIE_SRC */
+ else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) {
+ bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len);
+ }
+#ifdef CONNECTION_STATISTICS
+ else if (strnicmp(command, CMD_GET_CONNECTION_STATS,
+ strlen(CMD_GET_CONNECTION_STATS)) == 0) {
+ bytes_written = wl_android_get_connection_stats(net, command,
+ priv_cmd.total_len);
+ }
#endif
else if(strnicmp(command, CMD_GET_CHANNEL, strlen(CMD_GET_CHANNEL)) == 0) {
bytes_written = wl_android_get_channel(net, command, priv_cmd.total_len);
void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, wl_scan_results_t *ss_list);
void wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl);
#endif
-#endif /* _wl_android_ */
\ No newline at end of file
+#endif /* _wl_android_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfg80211.c 491569 2014-07-16 21:28:40Z $
+ * $Id: wl_cfg80211.c 506036 2014-10-02 11:33:14Z $
*/
/* */
#include <typedefs.h>
static struct bcm_cfg80211 *g_bcm_cfg = NULL;
u32 wl_dbg_level = WL_DBG_ERR;
-#define MAX_WAIT_TIME 1500
#ifdef WLAIBSS_MCHAN
#define IBSS_IF_NAME "ibss%d"
#endif /* WLAIBSS_MCHAN */
#define WL_IS_P2P_DEV_EVENT(e) ((e->emsg.ifidx == 0) && \
(e->emsg.bsscfgidx == P2PAPI_BSSCFG_DEVICE))
-#define DNGL_FUNC(func, parameters) func parameters
#define COEX_DHCP
#define WLAN_EID_SSID 0
#define WPS_CONFIG_VIRT_DISPLAY 0x2008
#define WPS_CONFIG_PHY_DISPLAY 0x4008
-#define PM_BLOCK 1
-#define PM_ENABLE 0
-
#ifdef BCMCCX
#ifndef WLAN_AKM_SUITE_CCKM
#define WLAN_AKM_SUITE_CCKM 0x00409600
#ifdef MFP
#define WL_AKM_SUITE_MFP_1X 0x000FAC05
#define WL_AKM_SUITE_MFP_PSK 0x000FAC06
+#define WL_MFP_CAPABLE 0x1
+#define WL_MFP_REQUIRED 0x2
#endif /* MFP */
#ifndef IBSS_COALESCE_ALLOWED
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
-static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+#ifdef P2PONEINT
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+#else
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+#endif
static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
struct net_device *ndev, bool aborted, bool fw_abort);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *data, size_t len);
+#else
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data,
+ size_t len);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper);
#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
-static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+#ifdef P2PONEINT
+chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+#else
+chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+#endif
s32 wl_cfg80211_channel_to_freq(u32 channel);
#if defined(DHCP_SCAN_SUPPRESS)
extern int disable_proptx;
#endif /* PROP_TXSTATUS_VSDB */
+extern int passive_channel_skip;
+
#if (WL_DBG_LEVEL > 0)
#define WL_DBG_ESTR_MAX 50
static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
return err;
}
-static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+chanspec_t
+#ifdef P2PONEINT
+wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+#else
+wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+#endif
{
chanspec_t chspec;
int err = 0;
bool enabled;
#endif
#endif /* PROP_TXSTATUS_VSDB */
+#if defined(SUPPORT_AP_POWERSAVE)
+ dhd_pub_t *dhd;
+#endif
if (!cfg)
return ERR_PTR(-EINVAL);
dhd = (dhd_pub_t *)(cfg->pub);
#endif
#endif /* PROP_TXSTATUS_VSDB */
-
+#if defined(SUPPORT_AP_POWERSAVE)
+ dhd = (dhd_pub_t *)(cfg->pub);
+#endif
/* Use primary I/F for sending cmds down to firmware */
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
"created net attach done\n", cfg->p2p->vir_ifname));
if (mode == WL_MODE_AP)
wl_set_drv_status(cfg, CONNECTED, new_ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+ if (mode == WL_MODE_AP) {
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+ }
+#endif
if (type == NL80211_IFTYPE_P2P_CLIENT)
dhd_mode = DHD_FLAG_P2P_GC_MODE;
else if (type == NL80211_IFTYPE_P2P_GO)
} else {
wl_clr_p2p_status(cfg, IF_ADDING);
WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+
+ WL_ERR(("left timeout : %d\n", timeout));
+ WL_ERR(("IF_ADDING status : %d\n", wl_get_p2p_status(cfg, IF_ADDING)));
+ WL_ERR(("event valid : %d\n", cfg->if_event_info.valid));
+
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ wl_set_p2p_status(cfg, IF_DELETING);
+
+ err = wl_cfgp2p_ifdel(cfg, &cfg->p2p->int_addr);
+ if (err == BCME_OK) {
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+ cfg->if_event_info.valid) {
+ WL_ERR(("IFDEL operation done\n"));
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ err = BCME_ERROR;
+ }
+ }
+ if (err != BCME_OK) {
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+ err, ndev->name));
+ net_os_send_hang_message(ndev);
+ }
+
memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
cfg->p2p->vif_created = false;
#ifdef PROP_TXSTATUS_VSDB
wl_clr_p2p_status(cfg, IF_CHANGED);
if (mode == WL_MODE_AP)
wl_set_drv_status(cfg, CONNECTED, ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif
} else if (ndev == bcmcfg_to_prmry_ndev(cfg) &&
!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
wl_set_drv_status(cfg, AP_CREATING, ndev);
}
} else {
WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA"));
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, FALSE);
+#endif
+#ifdef P2PONEINT
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ if (cfg->p2p_supported && cfg->p2p->vif_created) {
+ WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
+ p2p_on(cfg)));
+ wldev_iovar_setint(ndev, "mpc", 0);
+ wl_notify_escan_complete(cfg, ndev, true, true);
+
+ /* In concurrency case, STA may be already associated in a particular
+ * channel. so retrieve the current channel of primary interface and
+ * then start the virtual interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ wlif_type = WL_P2P_IF_CLIENT;
+ WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d) chspec 0x%x \n",
+ ndev->name, ap, infra, type, chspec));
+ wl_set_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+ wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+ wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_CHANGED) == true),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ dhd->op_mode |= DHD_FLAG_P2P_GC_MODE;
+ dhd->op_mode &= ~DHD_FLAG_P2P_GO_MODE;
+ wl_clr_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+
+ INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
+ }
+#endif /* P2PONEINT */
}
if (ibss) {
{
s32 err = BCME_OK;
s32 passive_scan;
+ s32 passive_scan_time;
+ s32 passive_scan_time_org;
wl_scan_results_t *results;
WL_SCAN(("Enter \n"));
mutex_lock(&cfg->usr_sync);
goto exit;
}
+ if (passive_channel_skip) {
+
+ err = wldev_ioctl(ndev, WLC_GET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org), false);
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
+
+ passive_scan_time = 0;
+ err = wldev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time, sizeof(passive_scan_time), true);
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
+ passive_channel_skip));
+ }
+
err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+
+ if (passive_channel_skip) {
+ err = wldev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org), true);
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
+ passive_scan_time_org));
+ }
+
exit:
mutex_unlock(&cfg->usr_sync);
return err;
ssids = this_ssid;
}
+ if (request && !p2p_scan(cfg)) {
+ WL_TRACE_HW4(("START SCAN\n"));
+ }
+
cfg->scan_request = request;
wl_set_drv_status(cfg, SCANNING, ndev);
WL_DBG(("Enter \n"));
RETURN_EIO_IF_NOT_UP(cfg);
+#ifdef P2PONEINT
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("scan use [dev name %s ] \n", ndev->name));
+#endif
+
err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
if (unlikely(err)) {
if ((err == BCME_EPERM) && cfg->scan_suppressed)
}
#endif /* WLAIBSS_MCHAN */
+s32
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+ wl_interface_create_t iface;
+ s32 ret;
+ wl_interface_info_t *info;
+
+ bzero(&iface, sizeof(wl_interface_create_t));
+
+ iface.ver = WL_INTERFACE_CREATE_VER;
+
+ if (iface_type == NL80211_IFTYPE_AP)
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ else
+ iface.flags = WL_INTERFACE_CREATE_STA;
+
+ if (del) {
+ ret = wldev_iovar_setbuf(ndev, "interface_remove",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ } else {
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ iface.flags |= WL_INTERFACE_MAC_USE;
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(wl_interface_create_t),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret == 0) {
+ /* success */
+ info = (wl_interface_info_t *)cfg->ioctl_buf;
+ WL_DBG(("wl interface create success!! bssidx:%d \n",
+ info->bsscfgidx));
+ ret = info->bsscfgidx;
+ }
+ }
+
+ if (ret < 0)
+ WL_ERR(("Interface %s failed!! ret %d\n",
+ del ? "remove" : "create", ret));
+
+ return ret;
+}
+
#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
s32
wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
/*
* Intialize the firmware I/F.
*/
- if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
- bsscfg_idx, iface_type, 0, addr)) < 0) {
- return NULL;
+ ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
+ NL80211_IFTYPE_STATION, 0, addr);
+ if (ret == BCME_UNSUPPORTED) {
+ /* Use bssidx 1 by default */
+ if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+ bsscfg_idx, iface_type, 0, addr)) < 0) {
+ return NULL;
+ }
+ } else if (ret < 0) {
+ WL_ERR(("Interface create failed!! ret:%d \n", ret));
+ goto fail;
+ } else {
+ /* Success */
+ bsscfg_idx = ret;
}
/*
s32 ret = BCME_OK;
s32 bsscfg_idx = 1;
u32 timeout;
+ u32 ifidx;
enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
WL_DBG(("Enter\n"));
memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
/* Delete the firmware interface */
- if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
- bsscfg_idx, iface_type, true, NULL)) < 0) {
- WL_ERR(("DEL bss failed ret:%d \n", ret));
- return ret;
+ ret = wl_cfg80211_interface_ops(cfg, ndev, cfg->cfgdev_bssidx,
+ NL80211_IFTYPE_STATION, 1, NULL);
+ if (ret == BCME_UNSUPPORTED) {
+ if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+ bsscfg_idx, iface_type, true, NULL)) < 0) {
+ WL_ERR(("DEL bss failed ret:%d \n", ret));
+ return ret;
+ }
+ } else if (ret < 0) {
+ WL_ERR(("Interface DEL failed ret:%d \n", ret));
+ return ret;
}
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
if (timeout <= 0 || cfg->bss_pending_op) {
WL_ERR(("timeout in waiting IF_DEL event\n"));
}
-
- wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+ ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
+ wl_cfg80211_remove_if(cfg, ifidx, ndev);
cfg->bss_cfgdev = NULL;
cfg->cfgdev_bssidx = -1;
cfg->bss_pending_op = FALSE;
wsec_val |= MFP_CAPABLE;
if (rsn_cap[0] & RSN_CAP_MFPR)
wsec_val |= MFP_REQUIRED;
+
+ if (rsn_cap[0] & RSN_CAP_MFPR)
+ mfp = WL_MFP_REQUIRED;
+ else
+ mfp = WL_MFP_CAPABLE;
+ err = wldev_iovar_setint_bsscfg(dev, "mfp",
+ mfp, bssidx);
}
}
}
wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
- wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+ err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("wpaie set error (%d)\n", err));
+ return err;
+ }
} else {
- wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+ err = wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("wpaie set error (%d)\n", err));
+ return err;
+ }
}
if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
RETURN_EIO_IF_NOT_UP(cfg);
act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+#ifdef ESCAN_RESULT_PATCH
+ if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid &&
+ (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) {
+ WL_ERR(("Disconnecting from connecting device: " MACDBG "\n",
+ MAC2STRDBG(curbssid)));
+ act = true;
+ }
+#endif /* ESCAN_RESULT_PATCH */
if (act) {
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
}
int
-wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable)
+wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable)
{
int err;
wl_eventmsg_buf_t ev_buf;
/* roam offload is only for the primary device */
return -1;
}
- err = wldev_iovar_setint(dev, "roam_offload", (int)enable);
+ err = wldev_iovar_setint(dev, "roam_offload", enable);
if (err)
return err;
wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
struct net_device *dev, u8 key_idx)
{
+#ifdef MFP
+ return 0;
+#else
WL_INFORM(("Not supported\n"));
return -EOPNOTSUPP;
+#endif /* MFP */
}
static s32
#endif
#if defined(RSSIOFFSET)
rssi = wl_update_rssi_offset(dev, rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(rssi, RSSI_MAXVAL);
#endif
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = rssi;
{
s32 err = 0;
+#ifdef P2PLISTEN_AP_SAMECHN
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_device *dev;
+#endif /* P2PLISTEN_AP_SAMECHN */
+
#if defined(WL_CFG80211_P2P_DEV_IF)
if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
WL_DBG((" enter ) on P2P dedicated discover interface\n"));
#else
WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex));
#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#ifdef P2PLISTEN_AP_SAMECHN
+ if (cfg && cfg->p2p_resp_apchn_status) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+ wl_cfg80211_set_p2p_resp_ap_chn(dev, 0);
+ cfg->p2p_resp_apchn_status = false;
+ WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
+ }
+#endif /* P2PLISTEN_AP_SAMECHN */
return err;
}
dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ if (!dev) {
+ WL_ERR(("dev is NULL\n"));
+ return -EINVAL;
+ }
+
/* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE) */
if (discover_cfgdev(cfgdev, cfg)) {
+ if (!cfg->p2p_supported || !cfg->p2p) {
+ WL_ERR(("P2P doesn't setup completed yet\n"));
+ return -EINVAL;
+ }
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
}
else {
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
s32 ie_len = len - ie_offset;
+#ifdef P2PONEINT
+ if (dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p)
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
}
static s32
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def chandef)
+#else
wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
+#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
{
s32 _chan;
chanspec_t chspec = 0;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
#endif /* CUSTOM_SET_CPUCORE */
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
+ enum nl80211_channel_type channel_type = NL80211_CHAN_HT20;
+#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
+
+#ifndef P2PONEINT
dev = ndev_to_wlc_ndev(dev, cfg);
+#endif
_chan = ieee80211_frequency_to_channel(chan->center_freq);
printk("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
dev->ifindex, channel_type, _chan);
+#ifdef CUSTOM_PLATFORM_NV_TEGRA
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_COMPAT_WIRELESS))
+ WL_ERR(("chan_width = %d\n", chandef.width));
+ switch (chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = WL_CHANSPEC_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = WL_CHANSPEC_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = WL_CHANSPEC_BW_8080;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = WL_CHANSPEC_BW_160;
+ break;
+ default:
+ bw = WL_CHANSPEC_BW_20;
+ break;
+ }
+ goto set_channel;
+#endif /* ((LINUX_VERSION >= VERSION(3, 8, 0) && !WL_COMPAT_WIRELESS) */
+#endif /* CUSTOM_PLATFORM_NV_TEGRA */
if (chan->band == IEEE80211_BAND_5GHZ) {
param.band = WLC_BAND_5G;
wpa_suite_mcast_t *mcast;
wpa_suite_ucast_t *ucast;
wpa_suite_auth_key_mgmt_t *mgmt;
+ wpa_pmkid_list_t *pmkid;
+ int cnt = 0;
+#ifdef MFP
+ int mfp = 0;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+#endif /* MFP */
u16 suite_count;
u8 rsn_cap[2];
goto exit;
WL_DBG(("Enter \n"));
- len = wpa2ie->len;
+ len = wpa2ie->len - WPA2_VERSION_LEN;
/* check the mcast cipher */
mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
switch (mcast->type) {
wsec = (pval | gval | SES_OW_ENABLED);
/* check the AKM */
mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
- suite_count = ltoh16_ua(&mgmt->count);
- switch (mgmt->list[0].type) {
- case RSN_AKM_NONE:
- wpa_auth = WPA_AUTH_NONE;
- break;
- case RSN_AKM_UNSPECIFIED:
- wpa_auth = WPA2_AUTH_UNSPECIFIED;
- break;
- case RSN_AKM_PSK:
- wpa_auth = WPA2_AUTH_PSK;
- break;
- default:
- WL_ERR(("No Key Mgmt Info\n"));
+ suite_count = cnt = ltoh16_ua(&mgmt->count);
+ while (cnt--) {
+ switch (mgmt->list[cnt].type) {
+ case RSN_AKM_NONE:
+ wpa_auth = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ wpa_auth = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ wpa_auth = WPA2_AUTH_PSK;
+ break;
+#ifdef MFP
+ case RSN_AKM_MFP_PSK:
+ wpa_auth |= WPA2_AUTH_PSK;
+ wsec |= MFP_SHA256;
+ break;
+ case RSN_AKM_MFP_1X:
+ wpa_auth |= WPA2_AUTH_UNSPECIFIED;
+ wsec |= MFP_SHA256;
+ break;
+#endif /* MFP */
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
}
if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
wme_bss_disable = 1;
}
+#ifdef MFP
+ if (rsn_cap[0] & RSN_CAP_MFPR) {
+ WL_DBG(("MFP Required \n"));
+ mfp = WL_MFP_REQUIRED;
+ } else if (rsn_cap[0] & RSN_CAP_MFPC) {
+ WL_DBG(("MFP Capable \n"));
+ mfp = WL_MFP_CAPABLE;
+ }
+#endif /* MFP */
+
/* set wme_bss_disable to sync RSN Capabilities */
err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
if (err < 0) {
WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
}
+ if ((len -= RSN_CAP_LEN) >= WPA2_PMKID_COUNT_LEN) {
+ pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN);
+ cnt = ltoh16_ua(&pmkid->count);
+ if (cnt != 0) {
+ WL_ERR(("AP has non-zero PMKID count. Wrong!\n"));
+ return BCME_ERROR;
+ }
+ /* since PMKID cnt is known to be 0 for AP, */
+ /* so don't bother to send down this info to firmware */
+ }
+
+#ifdef MFP
+ if ((len -= WPA2_PMKID_COUNT_LEN) >= RSN_GROUPMANAGE_CIPHER_LEN) {
+ err = wldev_iovar_setbuf_bsscfg(dev, "bip",
+ (void *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN),
+ RSN_GROUPMANAGE_CIPHER_LEN,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("bip set error %d\n", err));
+ return BCME_ERROR;
+ }
+ }
+#endif
+
/* set auth */
err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
if (err < 0) {
WL_ERR(("wsec error %d\n", err));
return BCME_ERROR;
}
+
+#ifdef MFP
+ if (mfp) {
+ /* This needs to go after wsec otherwise the wsec command will
+ * overwrite the values set by MFP
+ */
+ if ((err = wldev_iovar_setint_bsscfg(dev, "mfp", mfp, bssidx)) < 0) {
+ WL_ERR(("MFP Setting failed. ret = %d \n", err));
+ return err;
+ }
+ }
+#endif /* MFP */
+
/* set upper-layer auth */
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
if (err < 0) {
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
WL_DBG(("Start AP req on P2P iface: GO\n"));
+#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
if ((err = wl_cfg80211_set_channel(wiphy, dev,
dev->ieee80211_ptr->preset_chandef.chan,
- NL80211_CHAN_HT20) < 0)) {
+ dev->ieee80211_ptr->preset_chandef) < 0)) {
WL_ERR(("Set channel failed \n"));
goto fail;
}
#if defined(WL_ENABLE_P2P_IF)
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
+#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
#if defined(WL_ENABLE_P2P_IF)
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
+#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
#if defined(WL_ENABLE_P2P_IF)
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
+#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+ .tdls_mgmt = wl_cfg80211_tdls_mgmt,
.tdls_oper = wl_cfg80211_tdls_oper,
#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */
#ifdef WL_SUPPORT_ACS
#endif
#if defined(RSSIOFFSET)
notif_bss_info->rssi = wl_update_rssi_offset(bcmcfg_to_prmry_ndev(cfg), notif_bss_info->rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ notif_bss_info->rssi = MIN(notif_bss_info->rssi, RSSI_MAXVAL);
#endif
memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
mgmt_type = cfg->active_scan ?
if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
} else if (event == WLC_E_DISASSOC_IND) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
u16 flags = ntoh16(e->flags);
u32 status = ntoh32(e->status);
bool active;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ struct ieee80211_channel *channel = NULL;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ u32 chanspec, chan;
+ u32 freq, band;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
if (event == WLC_E_JOIN) {
WL_DBG(("joined in IBSS network\n"));
}
if (event == WLC_E_JOIN || event == WLC_E_START ||
(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get chanspec %d\n", err));
+ return err;
+ }
+ chan = wf_chspec_ctlchan(wl_chspec_driver_to_host(chanspec));
+ band = (chan <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ freq = ieee80211_channel_to_frequency(chan, band);
+ channel = ieee80211_get_channel(wiphy, freq);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
/* ROAM or Redundant */
u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, false);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ cfg80211_ibss_joined(ndev, (s8 *)&e->addr, channel, GFP_KERNEL);
+#else
cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+#endif
}
else {
/* New connection */
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, false);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ cfg80211_ibss_joined(ndev, (s8 *)&e->addr, channel, GFP_KERNEL);
+#else
cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+#endif
wl_set_drv_status(cfg, CONNECTED, ndev);
active = true;
wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT);
dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
} else if (wl_is_linkdown(cfg, e)) {
+#ifdef P2PLISTEN_AP_SAMECHN
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0);
+ cfg->p2p_resp_apchn_status = false;
+ WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
+ }
+#endif /* P2PLISTEN_AP_SAMECHN */
+
if (cfg->scan_request)
wl_notify_escan_complete(cfg, ndev, true, true);
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
s32 err = 0;
struct wiphy *wiphy;
u32 channel;
+ struct ieee80211_channel *cur_channel;
+ u32 freq, band;
wiphy = bcmcfg_to_wiphy(cfg);
ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- bss = cfg80211_get_bss(wiphy, NULL, curbssid,
- ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
mutex_lock(&cfg->usr_sync);
bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+#else
+ band = (channel <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ freq = ieee80211_channel_to_frequency(channel, band);
+#endif
+ cur_channel = ieee80211_get_channel(wiphy, freq);
+
+ bss = cfg80211_get_bss(wiphy, cur_channel, curbssid,
+ ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
if (!bss) {
WL_DBG(("Could not find the AP\n"));
memset(&bssid, 0, ETHER_ADDR_LEN);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef P2PONEINT
+ WL_DBG((" device name is ndev %s \n", ndev->name));
+#endif
if (channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
}
}
(void) sd_act_frm;
+#ifdef WLTDLS
+ } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) {
+ WL_DBG((" TDLS Action Frame Received type = %d \n",
+ mgmt_frame[DOT11_MGMT_HDR_LEN + 1]));
+
+ if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
+ cfg->tdls_mgmt_frame = mgmt_frame;
+ cfg->tdls_mgmt_frame_len = mgmt_frame_len;
+ cfg->tdls_mgmt_freq = freq;
+ return 0;
+ }
+
+ } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_VENDOR_SPECIFIC) {
+ WL_DBG((" TDLS Vendor Specific Received type \n"));
+#endif
} else {
if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
}
}
+#ifdef P2PONEINT
+ if (ndev == cfg->p2p_net && ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ cfgdev = ndev_to_cfgdev(ndev);
+ }
+ WL_DBG((" device name is ndev %s \n", ndev->name));
+#endif
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
kfree(cfg->ap_info);
cfg->ap_info = NULL;
}
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ }
+#endif /* WLTDLS */
}
static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
*/
static bool wl_cfg80211_netdev_notifier_registered = FALSE;
-static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+void
+#ifdef P2PONEINT
+wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+#else
+wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+#endif
{
wl_scan_params_t *params = NULL;
s32 params_size = 0;
return err;
}
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+ int idx;
+ for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+ int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+ if (bss->RSSI < candidate[idx].RSSI) {
+ if (len)
+ memcpy(&candidate[idx + 1], &candidate[idx],
+ sizeof(removal_element_t) * len);
+ candidate[idx].RSSI = bss->RSSI;
+ candidate[idx].length = bss->length;
+ memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
+ return;
+ }
+ }
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+ wl_bss_info_t *bi)
+{
+ int idx1, idx2;
+ int total_delete_len = 0;
+ for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+ wl_bss_info_t *bss = NULL;
+ if (candidate[idx1].RSSI >= bi->RSSI)
+ continue;
+ for (idx2 = 0; idx2 < list->count; idx2++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+ list->bss_info;
+ if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ candidate[idx1].RSSI == bss->RSSI &&
+ candidate[idx1].length == dtoh32(bss->length)) {
+ u32 delete_len = dtoh32(bss->length);
+ WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
+ MAC2STRDBG(bss->BSSID.octet)));
+ if (idx2 < list->count -1) {
+ memmove((u8 *)bss, (u8 *)bss + delete_len,
+ list->buflen - cur_len - delete_len);
+ }
+ list->buflen -= delete_len;
+ list->count--;
+ total_delete_len += delete_len;
+ /* if delete_len is greater than or equal to result length */
+ if (total_delete_len >= bi->length) {
+ return;
+ }
+ break;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ }
+}
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
} else {
int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+ int remove_lower_rssi = FALSE;
+
+ bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
list = wl_escan_get_buf(cfg, FALSE);
if (scan_req_match(cfg)) {
#ifdef WL_HOST_BAND_MGMT
}
#endif /* WL_HOST_BAND_MGMT */
}
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+ remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
WL_SCAN(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
for (i = 0; i < list->count; i++) {
bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
: list->bss_info;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ WL_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+ i, bss->RSSI, list->count));
+
+ if (remove_lower_rssi)
+ wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
(CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
cur_len += dtoh32(bss->length);
}
if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+ goto exit;
+ }
+#else
WL_ERR(("Buffer is too small: ignoring\n"));
goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
}
if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID
WL_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID));
}
else if (status == WLC_E_STATUS_SUCCESS) {
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#if defined(P2P_DISCOVERY_WAR)
+ if (cfg->p2p_net && cfg->scan_request &&
+ cfg->scan_request->dev == cfg->p2p_net &&
+ !cfg->p2p->vif_created) {
+ if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 1) < 0) {
+ WL_ERR(("mpc enabling back failed\n"));
+ }
+ }
+#endif /* defined(P2P_DISCOVERY_WAR) */
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
escan_result->sync_id);
wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
}
else if (status == WLC_E_STATUS_ABORT) {
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#if defined(P2P_DISCOVERY_WAR)
+ if (cfg->p2p_net && cfg->scan_request &&
+ cfg->scan_request->dev == cfg->p2p_net &&
+ !cfg->p2p->vif_created) {
+ if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 1) < 0) {
+ WL_ERR(("mpc enabling back failed\n"));
+ }
+ }
+#endif /* defined(P2P_DISCOVERY_WAR) */
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
wl_escan_print_sync_id(status, escan_result->sync_id,
cfg->escan_info.cur_sync_id);
}
}
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+struct net_device *wl0dot1_dev;
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) || \
+ defined(P2PONEINT)
static s32 wl_cfg80211_attach_p2p(void)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
return -ENODEV;
}
+#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
+ wl0dot1_dev = cfg->p2p_net;
+#endif /* CUSTOMER_HW20 && WLANAUDIO */
+
return 0;
}
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT || P2PONEINT */
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
static s32 wl_cfg80211_detach_p2p(void)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
goto fail;
-#if defined(WL_ENABLE_P2P_IF)
+#ifdef P2PONEINT
+ if (!cfg->p2p_net) {
+ cfg->p2p_supported = true;
+
+ err = wl_cfg80211_attach_p2p();
+ if (err)
+ goto fail;
+
+ cfg->p2p_supported = true;
+ }
+#endif
+#if defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT)
if (cfg->p2p_net) {
/* Update MAC addr for p2p0 interface here. */
memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
return -ENODEV;
}
#endif /* WL_ENABLE_P2P_IF */
+#ifndef P2PONEINT
cfg->p2p_supported = true;
+#endif
} else if (ret == 0) {
if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
goto fail;
g_bcm_cfg = cfg;
#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+#ifndef P2PONEINT
err = wl_cfg80211_attach_p2p();
if (err)
goto cfg80211_attach_out;
+#endif
#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
return err;
}
}
-#if defined(WL_ENABLE_P2P_IF)
+#if defined(P2PONEINT)
static int wl_is_p2p_event(struct wl_event_q *e)
{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
switch (e->etype) {
- /* We have to seperate out the P2P events received
- * on primary interface so that it can be send up
- * via p2p0 interface.
- */
- case WLC_E_P2P_PROBREQ_MSG:
- case WLC_E_P2P_DISC_LISTEN_COMPLETE:
- case WLC_E_ACTION_FRAME_RX:
- case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
- case WLC_E_ACTION_FRAME_COMPLETE:
-
- if (e->emsg.ifidx != 0) {
- WL_TRACE(("P2P event(%d) on virtual interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- /* We are only bothered about the P2P events received
- * on primary interface. For rest of them return false
- * so that it is sent over the interface corresponding
- * to the ifidx.
- */
- return FALSE;
- } else {
+ case WLC_E_IF:
WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n",
e->etype, e->emsg.ifidx));
- return TRUE;
- }
- break;
- default:
- WL_TRACE(("NON-P2P event(%d) on interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- return FALSE;
+ (void)schedule_timeout(20);
+
+ if (wl_get_p2p_status(cfg, IF_ADDING) ||
+ wl_get_p2p_status(cfg, IF_DELETING) ||
+ wl_get_p2p_status(cfg, IF_CHANGING) ||
+ wl_get_p2p_status(cfg, IF_CHANGED)) {
+ WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)."
+ " Sent it to p2p0 \n", e->emsg.ifidx));
+ return TRUE;
+ } else {
+ WL_TRACE(("Event is Not p2p event return False \n"));
+ return FALSE;
+ }
+
+ case WLC_E_P2P_PROBREQ_MSG:
+ case WLC_E_P2P_DISC_LISTEN_COMPLETE:
+ case WLC_E_ACTION_FRAME_RX:
+ case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
+ case WLC_E_ACTION_FRAME_COMPLETE:
+
+ if (e->emsg.ifidx != 0) {
+ WL_TRACE(("P2P event(%d) on virtual interface(ifidx:%d)\n",
+ e->etype, e->emsg.ifidx));
+ return FALSE;
+ } else {
+ WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n",
+ e->etype, e->emsg.ifidx));
+ return TRUE;
+ }
+ break;
+
+ default:
+ WL_TRACE(("NON-P2P event(%d) on interface(ifidx:%d)\n",
+ e->etype, e->emsg.ifidx));
+ return FALSE;
}
}
-#endif /* BCMDONGLEHOST && (WL_CFG80211_P2P_DEV_IF || WL_ENABLE_P2P_IF) */
+#endif
static s32 wl_event_handler(void *data)
{
* interface.
*/
#if defined(WL_CFG80211_P2P_DEV_IF)
- if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev)) {
+#ifdef P2PONEINT
+ if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_wdev))
+#else
+ if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev))
+#endif
+ {
cfgdev = bcmcfg_to_p2p_wdev(cfg);
} else {
struct net_device *ndev = NULL;
ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx);
if (ndev)
cfgdev = ndev_to_wdev(ndev);
+#ifdef P2PONEINT
+ else if (e->etype == WLC_E_IF) {
+ wl_put_event(e);
+ DHD_OS_WAKE_UNLOCK(cfg->pub);
+ continue;
+ }
+
+ if (cfgdev == NULL) {
+ if (e->etype == WLC_E_IF)
+ cfgdev = bcmcfg_to_prmry_wdev(cfg);
+ else {
+ cfgdev = ndev_to_wdev(wl_to_p2p_bss_ndev(cfg,
+ P2PAPI_BSSCFG_CONNECTION));
+ }
+ }
+#endif
}
#elif defined(WL_ENABLE_P2P_IF)
- // terence 20150116: fix for p2p connection in kernel 3.4
-// if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) {
- if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_net)) {
+ if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) {
cfgdev = cfg->p2p_net;
} else {
cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub),
return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
}
+#ifdef P2PLISTEN_AP_SAMECHN
+s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable)
+{
+ s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable);
+
+ if ((ret == 0) && enable) {
+ /* disable PM for p2p responding on infra AP channel */
+ s32 pm = PM_OFF;
+
+ ret = wldev_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), true);
+ }
+
+ return ret;
+}
+#endif /* P2PLISTEN_AP_SAMECHN */
s32 wl_cfg80211_channel_to_freq(u32 channel)
{
info.rssi = dtoh16(bi->RSSI);
#if defined(RSSIOFFSET)
info.rssi = wl_update_rssi_offset(ndev, info.rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ info->rssi = MIN(info->rssi, RSSI_MAXVAL);
#endif
memcpy(info.bssid, &bi->BSSID, ETH_ALEN);
info.ie_len = buflen;
#ifdef PCIE_FULL_DONGLE
dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]);
#endif /* PCIE_FULL_DONGLE */
+ if (cfg->tdls_mgmt_frame) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
+ }
msg = " TDLS PEER CONNECTED ";
break;
case WLC_E_TDLS_PEER_DISCONNECTED :
#ifdef PCIE_FULL_DONGLE
dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]);
#endif /* PCIE_FULL_DONGLE */
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_freq = 0;
+ }
msg = "TDLS PEER DISCONNECTED ";
break;
}
#endif /* WLTDLS */
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+static s32
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *data, size_t len)
+#else
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data,
+ size_t len)
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+{
+ s32 ret = 0;
+#ifdef WLTDLS
+ struct bcm_cfg80211 *cfg;
+ tdls_wfd_ie_iovar_t info;
+ memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
+ cfg = g_bcm_cfg;
+
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+ /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
+ * and that cuases build error
+ */
+ BCM_REFERENCE(peer_capability);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+
+ switch (action_code) {
+ /* We need to set TDLS Wifi Display IE to firmware
+ * using tdls_wfd_ie iovar
+ */
+ case WLAN_TDLS_SET_PROBE_WFD_IE:
+ info.mode = TDLS_WFD_PROBE_IE_TX;
+ memcpy(&info.data, data, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_SETUP_WFD_IE:
+ info.mode = TDLS_WFD_IE_TX;
+ memcpy(&info.data, data, len);
+ info.length = len;
+ break;
+ default:
+ WL_ERR(("Unsupported action code : %d\n", action_code));
+ goto out;
+ }
+
+ ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret) {
+ WL_ERR(("tdls_wfd_ie error %d\n", ret));
+ }
+out:
+#endif /* WLTDLS */
+ return ret;
+}
+
static s32
wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper)
ret = dhd_tdls_enable(dev, true, false, NULL);
if (ret < 0)
return ret;
- info.mode = TDLS_MANUAL_EP_DISCOVERY;
+ /* If the discovery request is broadcast then we need to set
+ * info.mode to Tunneled Probe Request
+ */
+ if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
+ info.mode = TDLS_MANUAL_EP_WFD_TPQ;
+ }
+ else {
+ info.mode = TDLS_MANUAL_EP_DISCOVERY;
+ }
break;
case NL80211_TDLS_SETUP:
/* auto mode on */
uint i, tokens, log_on = 0;
memset(tbuf, 0, sizeof(tbuf));
memset(sublog, 0, sizeof(sublog));
- if (copy_from_user(&tbuf, userbuf, min_t(size_t, sizeof(tbuf), count)))
+ if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count)))
return -EFAULT;
params = &tbuf[0];
if (!cfg || !cfg->wdev)
return -EINVAL;
+#if !defined(P2PONEINT)
if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
return -1;
+#endif /* BCMDONGLEHOST */
return 0;
}
WL_ERR(("Invalid band\n"));
return -EINVAL;
}
-
+
if ((ret = wldev_iovar_getint(ndev, "roam_off", &roam_off)) < 0) {
WL_ERR(("geting roam_off failed code=%d\n", ret));
return ret;
wl_event_msg_t e;
bzero(&e, sizeof(e));
- e.event_type = cpu_to_be32(WLC_E_ROAM);
+ e.event_type = cpu_to_be32(WLC_E_BSSID);
memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
/* trigger the roam event handler */
err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfg80211.h 491407 2014-07-16 09:23:04Z $
+ * $Id: wl_cfg80211.h 505096 2014-09-26 12:49:04Z $
*/
/**
#define CFG80211_ERROR_TEXT "CFG80211-ERROR) "
+#define MAX_WAIT_TIME 1500
+#define DNGL_FUNC(func, parameters) func parameters;
+
+#define PM_BLOCK 1
+#define PM_ENABLE 0
+
#if defined(DHD_DEBUG)
#define WL_ERR(args) \
do { \
struct net_device *ndev;
};
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#define BUF_OVERFLOW_MGMT_COUNT 3
+typedef struct {
+ int RSSI;
+ int length;
+ struct ether_addr BSSID;
+} removal_element_t;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
struct ap_info {
/* Structure to hold WPS, WPA IEs for a AP */
u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN];
#ifdef WLFBT
uint8 fbt_key[FBT_KEYLEN];
#endif
- bool roam_offload;
+ int roam_offload;
bool nan_running;
+#ifdef P2PLISTEN_AP_SAMECHN
+ bool p2p_resp_apchn_status;
+#endif /* P2PLISTEN_AP_SAMECHN */
+#ifdef WLTDLS
+ u8 *tdls_mgmt_frame;
+ u32 tdls_mgmt_frame_len;
+ s32 tdls_mgmt_freq;
+#endif /* WLTDLS */
};
extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
enum wl_management_type type);
extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+#ifdef P2PLISTEN_AP_SAMECHN
+extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
+#endif /* P2PLISTEN_AP_SAMECHN */
/* btcoex functions */
void* wl_cfg80211_btcoex_init(struct net_device *ndev);
#endif /* WL_SUPPORT_ACS */
extern int wl_cfg80211_get_ioctl_version(void);
-extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable);
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable);
#ifdef WL_NAN
extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd,
int cmd_len);
#endif /* WL_NAN */
-#endif /* _wl_cfg80211_h_ */
+#ifdef WL_CFG80211_P2P_DEV_IF
+extern void wl_cfg80211_del_p2p_wdev(void);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#endif /* _wl_cfg80211_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfgp2p.c 490694 2014-07-11 14:37:00Z $
+ * $Id: wl_cfgp2p.c 504573 2014-09-24 15:21:25Z $
*
*/
#include <typedefs.h>
#include <wldev_common.h>
#include <wl_android.h>
+#if defined(P2PONEINT)
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+
static s8 scanparambuf[WLC_IOCTL_SMLEN];
static s8 g_mgmt_ie_buf[2048];
static bool
static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
+#ifdef P2PONEINT
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+int wl_cfgp2p_if_open(struct net_device *net);
+int wl_cfgp2p_if_stop(struct net_device *net);
+#endif
+
#if defined(WL_ENABLE_P2P_IF)
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
.ndo_open = wl_cfgp2p_if_open,
.ndo_stop = wl_cfgp2p_if_stop,
.ndo_do_ioctl = wl_cfgp2p_do_ioctl,
+#ifndef P2PONEINT
.ndo_start_xmit = wl_cfgp2p_start_xmit,
+#endif
};
#endif /* WL_ENABLE_P2P_IF */
return BCME_NOTFOUND;
}
+#ifdef P2PLISTEN_AP_SAMECHN
+ CFGP2P_DBG(("p2p0 listen channel %d AP connection chan %d \n",
+ channel, cfg->channel));
+ if ((mode == WL_P2P_DISC_ST_LISTEN) && (cfg->channel == channel)) {
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ if (cfg->p2p_resp_apchn_status) {
+ CFGP2P_DBG(("p2p_resp_apchn_status already ON \n"));
+ return BCME_OK;
+ }
+
+ if (wl_get_drv_status(cfg, CONNECTED, primary_ndev)) {
+ ret = wl_cfg80211_set_p2p_resp_ap_chn(primary_ndev, 1);
+ cfg->p2p_resp_apchn_status = true;
+ CFGP2P_DBG(("p2p_resp_apchn_status ON \n"));
+ return ret;
+ }
+ }
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#if defined(P2P_DISCOVERY_WAR)
+ if (mode == WL_P2P_DISC_ST_LISTEN || mode == WL_P2P_DISC_ST_SEARCH) {
+ if (!cfg->p2p->vif_created) {
+ if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 0) < 0) {
+ WL_ERR(("mpc disabling failed\n"));
+ }
+ }
+ }
+#endif /* defined(P2P_DISCOVERY_WAR) */
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
discovery_mode.state = mode;
discovery_mode.chspec = wl_ch_host_to_driver(channel);
s32 ret = BCME_OK;
CFGP2P_DBG(("enter\n"));
- if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+ if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) <= 0) {
CFGP2P_ERR(("do nothing, not initialized\n"));
return -1;
}
CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
#ifdef DUAL_STA
- if ((cfg->p2p != NULL) && (bssidx != cfg->cfgdev_bssidx))
+ if ((cfg->p2p != NULL) && ((bssidx == 0) || (bssidx != cfg->cfgdev_bssidx)))
#else
if (cfg->p2p != NULL)
#endif
return BCME_ERROR;
}
} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ if (cfg->ap_info == NULL) {
+ CFGP2P_ERR(("hostapd ap_info null ptr refrence while setting IE\n"));
+ return BCME_ERROR;
+
+ }
switch (pktflag) {
case VNDR_IE_PRBRSP_FLAG :
mgmt_ie_buf = cfg->ap_info->probe_res_ie;
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+#if defined(P2P_DISCOVERY_WAR)
+ if (!cfg->p2p->vif_created) {
+ if (wldev_iovar_setint(ndev, "mpc", 1) < 0) {
+ WL_ERR(("mpc enabling back failed\n"));
+ }
+ }
+#endif /* defined(P2P_DISCOVERY_WAR) */
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
wl_set_p2p_status(cfg, LISTEN_EXPIRED);
if (timer_pending(&cfg->p2p->listen_timer)) {
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
if (ndev && (ndev->ieee80211_ptr != NULL)) {
#if defined(WL_CFG80211_P2P_DEV_IF)
- // terence 20141221: Fix p2p connection issue in both p2p device in Android 5.0
- // error log: CFG80211-ERROR) wl_cfg80211_send_action_frame : couldn't find peer's channel.
cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
#else
del_timer_sync(&cfg->p2p->listen_timer);
if (notify) {
#if defined(WL_CFG80211_P2P_DEV_IF)
+#ifdef P2PONEINT
+ if (wdev == NULL)
+ wdev = bcmcfg_to_p2p_wdev(cfg);
+#endif
if (wdev)
- // terence 20141221: Fix p2p connection issue in both p2p device in Android 5.0
- // error log: CFG80211-ERROR) wl_cfg80211_send_action_frame : couldn't find peer's channel.
cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
#else
* different from the P2P Device Address.
*/
memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+#ifndef P2PONEINT
out_int_addr->octet[4] ^= 0x80;
+#endif
}
s32 i = 0, index = -1;
#if defined(WL_CFG80211_P2P_DEV_IF)
- ndev = bcmcfg_to_prmry_ndev(cfg);
wdev = bcmcfg_to_p2p_wdev(cfg);
+#ifdef P2PONEINT
+ ndev = wdev_to_ndev(wdev);
+#else
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+#endif
#elif defined(WL_ENABLE_P2P_IF)
ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
wdev = ndev_to_wdev(ndev);
};
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) || \
+ defined(P2PONEINT)
+#ifdef P2PONEINT
+s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
+{
+
+ struct net_device *_ndev;
+ struct ether_addr primary_mac;
+ struct net_device *new_ndev;
+ chanspec_t chspec;
+ uint8 name[IFNAMSIZ];
+ s32 mode = 0;
+ s32 val = 0;
+
+
+ s32 wlif_type = -1;
+ s32 err, timeout = -1;
+
+ memset(name, 0, IFNAMSIZ);
+ strncpy(name, "p2p0", 4);
+ name[IFNAMSIZ - 1] = '\0';
+
+ if (cfg->p2p_net) {
+ CFGP2P_ERR(("p2p_net defined already.\n"));
+ return -EINVAL;
+ }
+
+ if (!cfg->p2p)
+ return -EINVAL;
+
+ if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ wl_cfgp2p_init_discovery(cfg);
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac,
+ &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+ }
+
+ _ndev = bcmcfg_to_prmry_ndev(cfg);
+ memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
+ strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+
+ wl_cfg80211_scan_abort(cfg);
+
+
+ /* In concurrency case, STA may be already associated in a particular channel.
+ * so retrieve the current channel of primary interface and then start the virtual
+ * interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(cfg->wdev->wiphy);
+
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "cfg p2p_ifadd"
+ */
+ wl_set_p2p_status(cfg, IF_ADDING);
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ wlif_type = WL_P2P_IF_CLIENT;
+
+
+ err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+ if (unlikely(err)) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual iface add failed (%d) \n", err));
+ return -ENOMEM;
+ }
+
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_ADDING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+ struct wireless_dev *vwdev;
+ int pm_mode = PM_ENABLE;
+ wl_if_event_info *event = &cfg->if_event_info;
+
+ /* IF_ADD event has come back, we can proceed to to register
+ * the new interface now, use the interface name provided by caller (thus
+ * ignore the one from wlc)
+ */
+ strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+ event->mac, event->bssidx);
+ if (new_ndev == NULL)
+ goto fail;
+
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
+
+ vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+ if (unlikely(!vwdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ goto fail;
+ }
+ vwdev->wiphy = cfg->wdev->wiphy;
+ WL_TRACE(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+ vwdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
+ vwdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+ wl_set_drv_status(cfg, READY, new_ndev);
+ cfg->p2p->vif_created = true;
+ wl_set_mode_by_netdev(cfg, new_ndev, mode);
+
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+ goto fail;
+ }
+
+ wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
+ val = 1;
+ /* Disable firmware roaming for P2P interface */
+ wldev_iovar_setint(new_ndev, "roam_off", val);
+
+ if (mode != WL_MODE_AP)
+ wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
+
+ WL_ERR((" virtual interface(%s) is "
+ "created net attach done\n", cfg->p2p->vir_ifname));
+
+ /* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ INIT_COMPLETION(cfg->iface_disable);
+#else
+ init_completion(&cfg->iface_disable);
+#endif
+ cfg->p2p_net = new_ndev;
+ cfg->p2p_wdev = vwdev;
+
+ return 0;
+ } else {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+ memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ cfg->p2p->vif_created = false;
+ }
+
+
+fail:
+ if (wlif_type == WL_P2P_IF_GO)
+ wldev_iovar_setint(_ndev, "mpc", 1);
+ return -ENODEV;
+
+}
+#else
s32
wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
{
return ret;
}
+#endif /* P2PONEINT */
s32
wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
return 0;
}
+#ifndef P2PONEINT
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
return ret;
}
-#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#endif /* P2PONEINT */
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT || defined(P2PONEINT) */
-#if defined(WL_ENABLE_P2P_IF)
+#if defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT)
+#ifdef P2PONEINT
+int wl_cfgp2p_if_open(struct net_device *net)
+#else
static int wl_cfgp2p_if_open(struct net_device *net)
+#endif
{
struct wireless_dev *wdev = net->ieee80211_ptr;
return 0;
}
+#ifdef P2PONEINT
+int wl_cfgp2p_if_stop(struct net_device *net)
+#else
static int wl_cfgp2p_if_stop(struct net_device *net)
+#endif
{
struct wireless_dev *wdev = net->ieee80211_ptr;
-
+#ifdef P2PONEINT
+ bcm_struct_cfgdev *cfgdev;
+#endif
if (!wdev)
return -EINVAL;
+#ifdef P2PONEINT
+ cfgdev = ndev_to_cfgdev(net);
+ wl_cfg80211_scan_stop(cfgdev);
+#else
wl_cfg80211_scan_stop(net);
+#endif
#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
return 0;
}
+#endif /* defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT) */
+#if defined(WL_ENABLE_P2P_IF)
bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
{
return (if_ops == &wl_cfgp2p_if_ops);
struct wireless_dev *wdev = NULL;
struct ether_addr primary_mac;
- if (!cfg)
+ if (!cfg || !cfg->p2p_supported)
return ERR_PTR(-EINVAL);
WL_TRACE(("Enter\n"));
if (!wdev)
return -EINVAL;
+#ifdef P2PONEINT
+ return -EINVAL;
+#endif
+
WL_TRACE(("Enter\n"));
if (!rtnl_is_locked()) {
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfgp2p.h 472818 2014-04-25 08:07:56Z $
+ * $Id: wl_cfgp2p.h 497431 2014-08-19 11:03:27Z $
*/
#ifndef _wl_cfgp2p_h_
#define _wl_cfgp2p_h_
};
struct p2p_bss {
- u32 bssidx;
+ s32 bssidx;
struct net_device *dev;
struct p2p_saved_ie saved_ie;
void *private_data;
* $Id: wl_cfgvendor.h 455257 2014-02-20 08:10:24Z $
*/
-/*
- * New vendor interface additon to nl80211/cfg80211 to allow vendors
- * to implement proprietary features over the cfg80211 stack.
- */
#ifndef _wl_cfgvendor_h_
#define _wl_cfgvendor_h_
ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
buflen));
+{
+ int channel;
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, merge broadcast SSID=\"%s\"\n",
+ __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, dtoh16(bi->RSSI), bi->SSID));
+}
+
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
IW_EV_QUAL_LEN >= end)
return -E2BIG;
+{
+ int channel;
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, merge broadcast SSID=\"%s\"\n",
+ __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, dtoh16(bi->RSSI), bi->SSID));
+}
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
cmd = SIOCGIWSCAN;
#endif
WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+ // terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
+ memset(&wrqu, 0, sizeof(wrqu));
if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
(g_iscan->iscan_state != ISCAN_STATE_IDLE))
up(&g_iscan->sysioc_sem);
{
iscan_info_t *iscan = NULL;
- WL_TRACE(("%s: iscan=%p\n", __FUNCTION__, iscan));
+ printk("%s: Enter\n", __FUNCTION__);
if (!dev)
return 0;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wldev_common.c 467328 2014-04-03 01:23:40Z $
+ * $Id: wldev_common.c 504503 2014-09-24 11:28:56Z $
*/
#include <osl.h>
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wldev_common.h 467328 2014-04-03 01:23:40Z $
+ * $Id: wldev_common.h 504503 2014-09-24 11:28:56Z $
*/
#ifndef __WLDEV_COMMON_H__
#define __WLDEV_COMMON_H__
int wldev_set_band(struct net_device *dev, uint band);
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+int wldev_miracast_tuning(struct net_device *dev, char *command, int total_len);
+int wldev_get_assoc_resp_ie(struct net_device *dev, char *command, int total_len);
+int wldev_get_rx_rate_stats(struct net_device *dev, char *command, int total_len);
+#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+
#endif /* __WLDEV_COMMON_H__ */