iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
- hw_params(priv).scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- hw_params(priv).tfd_size = sizeof(struct iwl_tfd);
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
- hw_params(priv).scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- hw_params(priv).tfd_size = sizeof(struct iwl_tfd);
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
- hw_params(priv).scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- hw_params(priv).tfd_size = sizeof(struct iwl_tfd);
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
- hw_params(priv).scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- hw_params(priv).tfd_size = sizeof(struct iwl_tfd);
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
- hw_params(priv).scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- hw_params(priv).tfd_size = sizeof(struct iwl_tfd);
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
static int iwl_set_hw_params(struct iwl_priv *priv)
{
- hw_params(priv).max_rxq_size = RX_QUEUE_SIZE;
- hw_params(priv).max_rxq_log = RX_QUEUE_SIZE_LOG;
if (iwlagn_mod_params.amsdu_size_8K)
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_8K);
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_4K);
- hw_params(priv).max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
if (iwlagn_mod_params.disable_11n)
priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
* REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
} else {
beacon_int = iwl_adjust_beacon_interval(beacon_int,
- hw_params(priv).max_beacon_itrvl * TIME_UNIT);
+ IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
}
/**
* struct iwl_hw_params
* @max_txq_num: Max # Tx queues supported
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
* @tx/rx_chains_num: Number of TX/RX chains
* @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
* @rx_page_order: Rx buffer page order
* @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
* @max_stations:
*/
struct iwl_hw_params {
u8 max_txq_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
u8 tx_chains_num;
u8 rx_chains_num;
u8 valid_tx_ant;
u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
u32 rx_page_order;
u8 max_stations;
u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
u32 max_inst_size;
u32 max_data_size;
u32 ct_kill_threshold; /* value in hw-dependent units */
struct iwl_tx_queue *txq, int slots_num,
u32 txq_id)
{
- size_t tfd_sz = hw_params(trans).tfd_size * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd) {
- dma_free_coherent(dev, hw_params(trans).tfd_size *
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
}
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 scd_bc_tbls_size = priv->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
if (WARN_ON(priv->txq)) {
}
ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- hw_params(trans).scd_bc_tbls_size);
+ scd_bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;