blob: 493414407ae171f9c04c0502ad4de347b22050a9 [file] [log] [blame]
/*
* Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**========================================================================
\file wma.c
\brief Implementation of WMA
========================================================================*/
/**=========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
$Header:$ $DateTime: $ $Author: $
when who what, where, why
-------- --- -----------------------------------------
12/03/2013 Ganesh Implementation of WMA APIs.
Kondabattini
27/03/2013 Ganesh Rx Management Support added
Babu
==========================================================================*/
/* ################ Header files ################ */
#include "ieee80211_common.h" /* ieee80211_frame */
#include "wma.h"
#include "wma_api.h"
#include "vos_api.h"
#include "wmi_unified_api.h"
#include "wlan_qct_sys.h"
#include "wniApi.h"
#include "aniGlobal.h"
#include "wmi_unified.h"
#include "wni_cfg.h"
#include "cfgApi.h"
#include "ol_txrx_ctrl_api.h"
#if defined(CONFIG_HL_SUPPORT)
#include "wlan_tgt_def_config_hl.h"
#else
#include "wlan_tgt_def_config.h"
#endif
#if defined(QCA_IBSS_SUPPORT)
#include "wlan_hdd_assoc.h"
#endif
#include "adf_nbuf.h"
#include "adf_os_types.h"
#include "ol_txrx_api.h"
#include "vos_memory.h"
#include "ol_txrx_types.h"
#include "ol_txrx_peer_find.h"
#include "wlan_qct_wda.h"
#include "wlan_qct_wda_msg.h"
#include "limApi.h"
#include "limSessionUtils.h"
#include "wdi_out.h"
#include "wdi_in.h"
#include "vos_cnss.h"
#include "vos_utils.h"
#include "tl_shim.h"
#if defined(QCA_WIFI_FTM)
#include "testmode.h"
#endif
#if !defined(REMOVE_PKT_LOG)
#include "pktlog_ac.h"
#endif
#include "dbglog_host.h"
/* FIXME: Inclusion of .c looks odd but this is how it is in internal codebase */
#include "wmi_version_whitelist.c"
#include "csrApi.h"
#include "ol_fw.h"
#include "dfs.h"
#include "radar_filters.h"
#include "regdomain_common.h"
#include "wma_ocb.h"
#include "wma_nan_datapath.h"
#include "adf_trace.h"
/* ################### defines ################### */
/*
* TODO: Following constant should be shared by firwmare in
* wmi_unified.h. This will be done once wmi_unified.h is updated.
*/
#define WMI_PEER_STATE_AUTHORIZED 0x2
#define WMA_2_4_GHZ_MAX_FREQ 3000
#define WOW_CSA_EVENT_OFFSET 12
/*
* In the WMI_WOW_WAKEUP_HOST_EVENTID after the fixed param
* the wmi nan event is at an offset of 12
* This is to extract and decode the NAN WMI event.
*/
#define WOW_NAN_EVENT_OFFSET 12
#define WMA_DEFAULT_SCAN_REQUESTER_ID 1
#define WMI_SCAN_FINISH_EVENTS (WMI_SCAN_EVENT_START_FAILED |\
WMI_SCAN_EVENT_COMPLETED |\
WMI_SCAN_EVENT_DEQUEUED)
/* default value */
#define DEFAULT_INFRA_STA_KEEP_ALIVE_PERIOD 20
/* pdev vdev and peer stats*/
#define FW_PDEV_STATS_SET 0x1
#define FW_VDEV_STATS_SET 0x2
#define FW_PEER_STATS_SET 0x4
#define FW_RSSI_PER_CHAIN_STATS_SET 0x8
#define FW_STATS_SET 0xf
/*AR9888/AR6320 noise floor approx value
* similar to the mentioned the TLSHIM
*/
#define WMA_TGT_NOISE_FLOOR_DBM (-96)
#define WMA_TGT_RSSI_INVALID 96
/*
* Make sure that link monitor and keep alive
* default values should be in sync with CFG.
*/
#define WMA_LINK_MONITOR_DEFAULT_TIME_SECS 10
#define WMA_KEEP_ALIVE_DEFAULT_TIME_SECS 5
#define AGC_DUMP 1
#define CHAN_DUMP 2
#define WD_DUMP 3
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
#define PCIE_DUMP 4
#endif
/* conformance test limits */
#define FCC 0x10
#define MKK 0x40
#define ETSI 0x30
/* Maximum Buffer length allowed for DFS phyerrors */
#define DFS_MAX_BUF_LENGHT 4096
#define WMI_DEFAULT_NOISE_FLOOR_DBM (-96)
/* Threshold to print tx time taken in ms*/
#define WDA_TX_TIME_THRESHOLD 1000
#define WMI_MCC_MIN_CHANNEL_QUOTA 20
#define WMI_MCC_MAX_CHANNEL_QUOTA 80
#define WMI_MCC_MIN_NON_ZERO_CHANNEL_LATENCY 30
/* The maximum number of patterns that can be transmitted by the firmware
* and maximum patterns size.
*/
#define WMA_MAXNUM_PERIODIC_TX_PTRNS 6
#define WMI_MAX_HOST_CREDITS 2
#define WMI_WOW_REQUIRED_CREDITS 1
#define WMI_MAX_MHF_ENTRIES 32
#ifdef FEATURE_WLAN_D0WOW
#define DISABLE_PCIE_POWER_COLLAPSE 1
#define ENABLE_PCIE_POWER_COLLAPSE 0
#endif
#define MAX_HT_MCS_IDX 8
#define MAX_VHT_MCS_IDX 10
#define INVALID_MCS_IDX 255
#define LINK_STATUS_LEGACY 0
#define LINK_STATUS_VHT 0x1
#define LINK_STATUS_MIMO 0x2
#define LINK_SUPPORT_VHT 0x4
#define LINK_SUPPORT_MIMO 0x8
#define LINK_RATE_VHT 0x3
#define WMA_MCC_MIRACAST_REST_TIME 400
#define WMA_LOG_COMPLETION_TIMER 10000 /* 10 seconds */
#define WMA_FW_TIME_SYNC_TIMER 60000 /* 1 min */
#define WMA_FW_TIME_STAMP_LOW_MASK 0xffffffff
#define WMI_TLV_HEADROOM 128
#define WMA_SUSPEND_TIMEOUT_IN_SSR 1
#define WMA_DEL_BSS_TIMEOUT_IN_SSR 10
#ifdef FEATURE_WLAN_SCAN_PNO
static int wma_nlo_scan_cmp_evt_handler(void *handle, u_int8_t *event,
u_int32_t len);
#endif
static enum powersave_qpower_mode wma_get_qpower_config(tp_wma_handle wma);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
/**
* wma_wow_wakeup_stats_event()- send wow wakeup stats
* tp_wma_handle wma: WOW wakeup packet counter
*
* This function sends wow wakeup stats diag event
*
* Return: void.
*/
static void wma_wow_wakeup_stats_event(tp_wma_handle wma)
{
WLAN_VOS_DIAG_EVENT_DEF(WowStats,
vos_event_wlan_powersave_wow_stats);
vos_mem_zero(&WowStats, sizeof(WowStats));
WowStats.wow_ucast_wake_up_count = wma->wow_ucast_wake_up_count;
WowStats.wow_bcast_wake_up_count = wma->wow_bcast_wake_up_count;
WowStats.wow_ipv4_mcast_wake_up_count =
wma->wow_ipv4_mcast_wake_up_count;
WowStats.wow_ipv6_mcast_wake_up_count =
wma->wow_ipv6_mcast_wake_up_count;
WowStats.wow_ipv6_mcast_ra_stats = wma->wow_ipv6_mcast_ra_stats;
WowStats.wow_ipv6_mcast_ns_stats = wma->wow_ipv6_mcast_ns_stats;
WowStats.wow_ipv6_mcast_na_stats = wma->wow_ipv6_mcast_na_stats;
WowStats.wow_pno_match_wake_up_count = wma->wow_pno_match_wake_up_count;
WowStats.wow_pno_complete_wake_up_count =
wma->wow_pno_complete_wake_up_count;
WowStats.wow_gscan_wake_up_count = wma->wow_gscan_wake_up_count;
WowStats.wow_low_rssi_wake_up_count = wma->wow_low_rssi_wake_up_count;
WowStats.wow_rssi_breach_wake_up_count =
wma->wow_rssi_breach_wake_up_count;
WowStats.wow_icmpv4_count = wma->wow_icmpv4_count;
WowStats.wow_icmpv6_count = wma->wow_icmpv6_count;
WowStats.wow_oem_response_wake_up_count =
wma->wow_oem_response_wake_up_count;
WLAN_VOS_DIAG_EVENT_REPORT(&WowStats, EVENT_WLAN_POWERSAVE_WOW_STATS);
}
#else
static void wma_wow_wakeup_stats_event(tp_wma_handle wma)
{
return;
}
#endif
#ifdef FEATURE_WLAN_EXTSCAN
/**
* enum extscan_report_events_type - extscan report events type
* @EXTSCAN_REPORT_EVENTS_BUFFER_FULL: report only when scan history is % full
* @EXTSCAN_REPORT_EVENTS_EACH_SCAN: report a scan completion event after scan
* @EXTSCAN_REPORT_EVENTS_FULL_RESULTS: forward scan results
* (beacons/probe responses + IEs)
* in real time to HAL, in addition to completion events.
* Note: To keep backward compatibility,
* fire completion events regardless of REPORT_EVENTS_EACH_SCAN.
* @EXTSCAN_REPORT_EVENTS_NO_BATCH: controls batching,
* 0 => batching, 1 => no batching
*/
enum extscan_report_events_type {
EXTSCAN_REPORT_EVENTS_BUFFER_FULL = 0x00,
EXTSCAN_REPORT_EVENTS_EACH_SCAN = 0x01,
EXTSCAN_REPORT_EVENTS_FULL_RESULTS = 0x02,
EXTSCAN_REPORT_EVENTS_NO_BATCH = 0x04,
EXTSCAN_REPORT_EVENTS_CONTEXT_HUB = 0x08,
};
#define WMA_EXTSCAN_CYCLE_WAKE_LOCK_DURATION (5 * 1000) /* in msec */
/*
* Maximum number of entires that could be present in the
* WMI_EXTSCAN_HOTLIST_MATCH_EVENT buffer from the firmware
*/
#define WMA_EXTSCAN_MAX_HOTLIST_ENTRIES 10
#endif
/* Data rate 100KBPS based on IE Index */
struct index_data_rate_type
{
v_U8_t mcs_index;
v_U16_t ht20_rate[2];
v_U16_t ht40_rate[2];
};
#ifdef WLAN_FEATURE_11AC
struct index_vht_data_rate_type
{
v_U8_t mcs_index;
v_U16_t ht20_rate[2];
v_U16_t ht40_rate[2];
v_U16_t ht80_rate[2];
};
#endif
/* MCS Based rate table */
/* HT MCS parameters with Nss = 1 */
static struct index_data_rate_type mcs_nss1[] =
{
/* MCS L20 S20 L40 S40 */
{0, {65, 72}, {135, 150 }},
{1, {130, 144}, {270, 300 }},
{2, {195, 217}, {405, 450 }},
{3, {260, 289}, {540, 600 }},
{4, {390, 433}, {815, 900 }},
{5, {520, 578}, {1080, 1200}},
{6, {585, 650}, {1215, 1350}},
{7, {650, 722}, {1350, 1500}}
};
/* HT MCS parameters with Nss = 2 */
static struct index_data_rate_type mcs_nss2[] =
{
/* MCS L20 S20 L40 S40 */
{0, {130, 144}, {270, 300 }},
{1, {260, 289}, {540, 600 }},
{2, {390, 433}, {810, 900 }},
{3, {520, 578}, {1080, 1200}},
{4, {780, 867}, {1620, 1800}},
{5, {1040, 1156}, {2160, 2400}},
{6, {1170, 1300}, {2430, 2700}},
{7, {1300, 1440}, {2700, 3000}}
};
#ifdef WLAN_FEATURE_11AC
/* MCS Based VHT rate table */
/* MCS parameters with Nss = 1*/
static struct index_vht_data_rate_type vht_mcs_nss1[] =
{
/* MCS L20 S20 L40 S40 L80 S80 */
{0, {65, 72 }, {135, 150}, {293, 325} },
{1, {130, 144}, {270, 300}, {585, 650} },
{2, {195, 217}, {405, 450}, {878, 975} },
{3, {260, 289}, {540, 600}, {1170, 1300}},
{4, {390, 433}, {810, 900}, {1755, 1950}},
{5, {520, 578}, {1080, 1200}, {2340, 2600}},
{6, {585, 650}, {1215, 1350}, {2633, 2925}},
{7, {650, 722}, {1350, 1500}, {2925, 3250}},
{8, {780, 867}, {1620, 1800}, {3510, 3900}},
{9, {865, 960}, {1800, 2000}, {3900, 4333}}
};
/*MCS parameters with Nss = 2*/
static struct index_vht_data_rate_type vht_mcs_nss2[] =
{
/* MCS L20 S20 L40 S40 L80 S80 */
{0, {130, 144}, {270, 300}, { 585, 650}},
{1, {260, 289}, {540, 600}, {1170, 1300}},
{2, {390, 433}, {810, 900}, {1755, 1950}},
{3, {520, 578}, {1080, 1200}, {2340, 2600}},
{4, {780, 867}, {1620, 1800}, {3510, 3900}},
{5, {1040, 1156}, {2160, 2400}, {4680, 5200}},
{6, {1170, 1300}, {2430, 2700}, {5265, 5850}},
{7, {1300, 1444}, {2700, 3000}, {5850, 6500}},
{8, {1560, 1733}, {3240, 3600}, {7020, 7800}},
{9, {1730, 1920}, {3600, 4000}, {7800, 8667}}
};
#endif
void wma_send_msg(tp_wma_handle wma_handle, u_int16_t msg_type,
void *body_ptr, u_int32_t body_val);
#ifdef QCA_IBSS_SUPPORT
static void wma_data_tx_ack_comp_hdlr(void *wma_context,
adf_nbuf_t netbuf,
int32_t status);
#endif
static VOS_STATUS wma_vdev_detach(tp_wma_handle wma_handle,
tpDelStaSelfParams pdel_sta_self_req_param,
u_int8_t generateRsp);
static int32_t wmi_unified_vdev_stop_send(wmi_unified_t wmi, u_int8_t vdev_id);
static tANI_U32 gFwWlanFeatCaps;
static eHalStatus wma_set_ppsconfig(tANI_U8 vdev_id, tANI_U16 pps_param,
int value);
static eHalStatus wma_set_mimops(tp_wma_handle wma_handle,
tANI_U8 vdev_id, int value);
#ifdef FEATURE_WLAN_TDLS
static int wma_update_fw_tdls_state(WMA_HANDLE handle, void *pwmaTdlsparams);
static int wma_update_tdls_peer_state(WMA_HANDLE handle,
tTdlsPeerStateParams *peerStateParams);
static int wma_set_tdls_offchan_mode(WMA_HANDLE wma_handle,
tTdlsChanSwitchParams *pChanSwitchParams);
#endif
static eHalStatus wma_set_smps_params(tp_wma_handle wma_handle,
tANI_U8 vdev_id, int value);
#if defined(QCA_WIFI_FTM)
void wma_utf_attach(tp_wma_handle wma_handle);
void wma_utf_detach(tp_wma_handle wma_handle);
static VOS_STATUS
wma_process_ftm_command(tp_wma_handle wma_handle,
struct ar6k_testmode_cmd_data *msg_buffer);
#endif
VOS_STATUS wma_create_peer(tp_wma_handle wma, ol_txrx_pdev_handle pdev,
ol_txrx_vdev_handle vdev, u8 peer_addr[6],
u_int32_t peer_type, u_int8_t vdev_id,
v_BOOL_t roam_synch_in_progress);
static ol_txrx_vdev_handle wma_vdev_attach(tp_wma_handle wma_handle,
tpAddStaSelfParams self_sta_req,
u_int8_t generateRsp);
static void wma_set_bsskey(tp_wma_handle wma_handle, tpSetBssKeyParams key_info);
/*DFS Attach*/
struct ieee80211com* wma_dfs_attach(struct ieee80211com *ic);
static void wma_dfs_detach(struct ieee80211com *ic);
void wma_set_bss_rate_flags(struct wma_txrx_node *iface,
tpAddBssParams add_bss);
/*Configure DFS with radar tables and regulatory domain*/
void wma_dfs_configure(struct ieee80211com *ic);
/*Configure the current channel with the DFS*/
struct ieee80211_channel *
wma_dfs_configure_channel(struct ieee80211com *dfs_ic,
wmi_channel *chan,
WLAN_PHY_MODE chanmode,
struct wma_vdev_start_req *req);
/* VDEV UP */
static int
wmi_unified_vdev_up_send(wmi_unified_t wmi,
u_int8_t vdev_id, u_int16_t aid,
u_int8_t bssid[IEEE80211_ADDR_LEN]);
#ifdef WLAN_FEATURE_ROAM_OFFLOAD
void wma_process_roam_synch_complete(WMA_HANDLE handle,
tSirSmeRoamOffloadSynchCnf *synchcnf);
void wma_process_roam_synch_fail(WMA_HANDLE handle,
tSirRoamOffloadSynchFail *synchfail);
#endif
static VOS_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle,
t_thermal_cmd_params thermal_info);
#ifdef FEATURE_WLAN_CH_AVOID
VOS_STATUS wma_process_ch_avoid_update_req(tp_wma_handle wma_handle,
tSirChAvoidUpdateReq *ch_avoid_update_req);
#endif /* FEATURE_WLAN_CH_AVOID */
static void wma_set_stakey(tp_wma_handle wma_handle, tpSetStaKeyParams key_info);
static void wma_beacon_miss_handler(tp_wma_handle wma, u_int32_t vdev_id,
uint32_t rssi);
static void wma_set_suspend_dtim(tp_wma_handle wma);
static void wma_set_resume_dtim(tp_wma_handle wma);
static int wma_roam_event_callback(WMA_HANDLE handle, u_int8_t *event_buf,
u_int32_t len);
static VOS_STATUS wma_stop_scan(tp_wma_handle wma_handle,
tAbortScanParams *abort_scan_req);
static void wma_set_sap_keepalive(tp_wma_handle wma, u_int8_t vdev_id);
static void wma_set_vdev_mgmt_rate(tp_wma_handle wma, u_int8_t vdev_id);
static int wma_smps_force_mode_callback(WMA_HANDLE handle, uint8_t *event_buf,
uint32_t len);
static void wma_send_time_stamp_sync_cmd(void *data);
tANI_U8 wma_getCenterChannel(tANI_U8 chan, tANI_U8 chan_offset);
/*
* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
* 0 for no restriction
* 1 for 1/4 us - Our lower layer calculations limit our precision to 1 msec
* 2 for 1/2 us - Our lower layer calculations limit our precision to 1 msec
* 3 for 1 us
* 4 for 2 us
* 5 for 4 us
* 6 for 8 us
* 7 for 16 us
*/
static const u_int8_t wma_mpdu_spacing[] = {0, 1, 1, 1, 2, 4, 8, 16};
static inline uint8_t wma_parse_mpdudensity(u_int8_t mpdudensity)
{
if (mpdudensity < sizeof(wma_mpdu_spacing))
return wma_mpdu_spacing[mpdudensity];
else
return 0;
}
/* Function : wma_get_vdev_count
* Discription : Returns number of active vdev.
* Args : @wma - wma handle
* Returns : Returns valid vdev count.
*/
static inline u_int8_t wma_get_vdev_count(tp_wma_handle wma)
{
u_int8_t vdev_count = 0, i;
for (i = 0; i < wma->max_bssid; i++) {
if (wma->interfaces[i].handle)
vdev_count++;
}
return vdev_count;
}
/**
* wma_did_ssr_happen() - Check if SSR happened by comparing current
* wma handle and new wma handle
* @wma: Pointer to wma handle
*
* This API will compare saved wma handle and new wma handle using global
* vos context. If both doesn't match implies that WMA handle got changed
* while waiting for command which will happen in SSR.
*
* Return: True if SSR happened else false
*/
static bool wma_did_ssr_happen(tp_wma_handle wma)
{
return vos_get_context(VOS_MODULE_ID_WDA,
vos_get_global_context(VOS_MODULE_ID_VOSS, NULL)) != wma;
}
/* Function : wma_is_vdev_in_ap_mode
* Description : Helper function to know whether given vdev id
* is in AP mode or not.
* Args : @wma - wma handle, @ vdev_id - vdev ID.
* Returns : True - if given vdev id is in AP mode.
* False - if given vdev id is not in AP mode.
*/
static bool wma_is_vdev_in_ap_mode(tp_wma_handle wma, u_int8_t vdev_id)
{
struct wma_txrx_node *intf = wma->interfaces;
if (vdev_id >= wma->max_bssid) {
WMA_LOGP("%s: Invalid vdev_id %hu", __func__, vdev_id);
VOS_ASSERT(0);
return false;
}
if ((intf[vdev_id].type == WMI_VDEV_TYPE_AP) &&
((intf[vdev_id].sub_type == WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO) ||
(intf[vdev_id].sub_type == 0)))
return true;
return false;
}
#ifdef QCA_IBSS_SUPPORT
/* Function : wma_is_vdev_in_ibss_mode
s_vdev_in_ibss_mode* Description : Helper function to know whether given vdev id
* is in IBSS mode or not.
* Args : @wma - wma handle, @ vdev_id - vdev ID.
* Retruns : True - if given vdev id is in IBSS mode.
* False - if given vdev id is not in IBSS mode.
*/
static bool wma_is_vdev_in_ibss_mode(tp_wma_handle wma, u_int8_t vdev_id)
{
struct wma_txrx_node *intf = wma->interfaces;
if (vdev_id >= wma->max_bssid) {
WMA_LOGP("%s: Invalid vdev_id %hu", __func__, vdev_id);
VOS_ASSERT(0);
return false;
}
if (intf[vdev_id].type == WMI_VDEV_TYPE_IBSS)
return true;
return false;
}
#endif
/*
* Function : wma_find_bssid_by_vdev_id
* Description : Get the BSS ID corresponding to the vdev ID
* Args : @wma - wma handle, @vdev_id - vdev ID
* Returns : Returns pointer to bssid on success,
* otherwise returns NULL.
*/
static inline u_int8_t *wma_find_bssid_by_vdev_id(tp_wma_handle wma,
u_int8_t vdev_id)
{
if (vdev_id >= wma->max_bssid)
return NULL;
return wma->interfaces[vdev_id].bssid;
}
/*
* Function : wma_find_vdev_by_bssid
* Description : Get the VDEV ID corresponding from BSS ID
* Args : @wma - wma handle, @vdev_id - vdev ID
* Returns : Returns pointer to bssid on success,
* otherwise returns NULL.
*/
static void *wma_find_vdev_by_bssid(tp_wma_handle wma, u_int8_t *bssid,
u_int8_t *vdev_id)
{
int i;
for (i = 0; i < wma->max_bssid; i++) {
if (vos_is_macaddr_equal(
(v_MACADDR_t *)wma->interfaces[i].bssid,
(v_MACADDR_t *)bssid) == VOS_TRUE) {
*vdev_id = i;
return wma->interfaces[i].handle;
}
}
return NULL;
}
#ifdef BIG_ENDIAN_HOST
/* ############# function definitions ############ */
/* function : wma_swap_bytes
* Description :
* Args :
* Retruns :
*/
v_VOID_t wma_swap_bytes(v_VOID_t *pv, v_SIZE_t n)
{
v_SINT_t no_words;
v_SINT_t i;
v_U32_t *word_ptr;
no_words = n/sizeof(v_U32_t);
word_ptr = (v_U32_t *)pv;
for (i=0; i<no_words; i++) {
*(word_ptr + i) = __cpu_to_le32(*(word_ptr + i));
}
}
#define SWAPME(x, len) wma_swap_bytes(&x, len);
#endif
/**
* mcs_rate_match() - find the match mcs rate
* @is_sgi: return if the SGI rate is found
* @nss: the nss in use
* @nss1_rate: the nss1 rate
* @nss1_srate: the nss1 SGI rate
* @nss2_rate: the nss2 rate
* @nss2_srate: the nss2 SGI rate
*
* This is a helper function to find the match of the tx_rate
* in terms of the nss1/nss2 rate with non-SGI/SGI.
*
* Return: the found rate or 0 otherwise
*/
static inline uint16_t mcs_rate_match(uint16_t match_rate, bool *is_sgi,
uint8_t nss, uint16_t nss1_rate, uint16_t nss1_srate,
uint16_t nss2_rate, uint16_t nss2_srate)
{
if (match_rate == nss1_rate)
return nss1_rate;
else if (match_rate == nss1_srate) {
*is_sgi = true;
return nss1_srate;
} else if (nss == 2 && match_rate == nss2_rate)
return nss2_rate;
else if (nss == 2 && match_rate == nss2_srate) {
*is_sgi = true;
return nss2_srate;
} else
return 0;
}
static tANI_U8 wma_get_mcs_idx(tANI_U16 maxRate, tANI_U8 rate_flags,
tANI_U8 nss,
tANI_U8 *mcsRateFlag)
{
tANI_U8 curIdx = 0;
tANI_U16 cur_rate = 0;
bool is_sgi = false;
WMA_LOGD("%s rate:%d rate_flgs: 0x%x, nss: %d",
__func__, maxRate,rate_flags, nss);
*mcsRateFlag = rate_flags;
*mcsRateFlag &= ~eHAL_TX_RATE_SGI;
#ifdef WLAN_FEATURE_11AC
for (curIdx = 0; curIdx < MAX_VHT_MCS_IDX; curIdx++) {
if (rate_flags & eHAL_TX_RATE_VHT80) {
/* check for vht80 nss1/2 rate set */
cur_rate = mcs_rate_match(maxRate, &is_sgi, nss,
vht_mcs_nss1[curIdx].ht80_rate[0],
vht_mcs_nss1[curIdx].ht80_rate[1],
vht_mcs_nss2[curIdx].ht80_rate[0],
vht_mcs_nss2[curIdx].ht80_rate[1]);
if (cur_rate)
goto rate_found;
}
if ((rate_flags & eHAL_TX_RATE_VHT40) |
(rate_flags & eHAL_TX_RATE_VHT80)) {
/* check for vht40 nss1/2 rate set */
cur_rate = mcs_rate_match(maxRate, &is_sgi, nss,
vht_mcs_nss1[curIdx].ht40_rate[0],
vht_mcs_nss1[curIdx].ht40_rate[1],
vht_mcs_nss2[curIdx].ht40_rate[0],
vht_mcs_nss2[curIdx].ht40_rate[1]);
if (cur_rate) {
*mcsRateFlag &= ~eHAL_TX_RATE_VHT80;
goto rate_found;
}
}
if ((rate_flags & eHAL_TX_RATE_VHT20) |
(rate_flags & eHAL_TX_RATE_VHT40) |
(rate_flags & eHAL_TX_RATE_VHT80)) {
/* check for vht20 nss1/2 rate set */
cur_rate = mcs_rate_match(maxRate, &is_sgi, nss,
vht_mcs_nss1[curIdx].ht20_rate[0],
vht_mcs_nss1[curIdx].ht20_rate[1],
vht_mcs_nss2[curIdx].ht20_rate[0],
vht_mcs_nss2[curIdx].ht20_rate[1]);
if (cur_rate) {
*mcsRateFlag &= ~(eHAL_TX_RATE_VHT80 |
eHAL_TX_RATE_VHT40);
goto rate_found;
}
}
}
#endif
for (curIdx = 0; curIdx < MAX_HT_MCS_IDX; curIdx++) {
if (rate_flags & eHAL_TX_RATE_HT40) {
/* check for ht40 nss1/2 rate set */
cur_rate = mcs_rate_match(maxRate, &is_sgi, nss,
mcs_nss1[curIdx].ht40_rate[0],
mcs_nss1[curIdx].ht40_rate[1],
mcs_nss2[curIdx].ht40_rate[0],
mcs_nss2[curIdx].ht40_rate[1]);
if (cur_rate) {
*mcsRateFlag = eHAL_TX_RATE_HT40;
goto rate_found;
}
}
if ((rate_flags & eHAL_TX_RATE_HT20) ||
(rate_flags & eHAL_TX_RATE_HT40)) {
/* check for ht20 nss1/2 rate set */
cur_rate = mcs_rate_match(maxRate, &is_sgi, nss,
mcs_nss1[curIdx].ht20_rate[0],
mcs_nss1[curIdx].ht20_rate[1],
mcs_nss2[curIdx].ht20_rate[0],
mcs_nss2[curIdx].ht20_rate[1]);
if (cur_rate) {
*mcsRateFlag = eHAL_TX_RATE_HT20;
goto rate_found;
}
}
}
rate_found:
/* set SGI flag only if this is SGI rate */
if (cur_rate && is_sgi == true)
*mcsRateFlag |= eHAL_TX_RATE_SGI;
WMA_LOGD("%s - cur_rate: %d index: %d rate_flag: 0x%x is_sgi: %d",
__func__, cur_rate, curIdx, *mcsRateFlag, is_sgi);
return (cur_rate ? curIdx : INVALID_MCS_IDX);
}
static struct wma_target_req *wma_find_vdev_req(tp_wma_handle wma,
u_int8_t vdev_id,
u_int8_t type)
{
struct wma_target_req *req_msg = NULL, *tmp;
bool found = false;
adf_os_spin_lock_bh(&wma->vdev_respq_lock);
list_for_each_entry_safe(req_msg, tmp,
&wma->vdev_resp_queue, node) {
if (req_msg->vdev_id != vdev_id)
continue;
if (req_msg->type != type)
continue;
found = true;
list_del(&req_msg->node);
break;
}
adf_os_spin_unlock_bh(&wma->vdev_respq_lock);
if (!found) {
WMA_LOGP("%s: target request not found for vdev_id %d type %d",
__func__, vdev_id, type);
return NULL;
}
WMA_LOGD("%s: target request found for vdev id: %d type %d msg %d",
__func__, vdev_id, type, req_msg->msg_type);
return req_msg;
}
/**
* wma_peek_vdev_req() - peek what request message is queued for response.
* the function does not delete the node after found
* @wma: WMA handle
* @vdev_id: vdev ID
* @type: request message type
*
* Return: the request message found
*/
static struct wma_target_req *wma_peek_vdev_req(tp_wma_handle wma,
uint8_t vdev_id,
uint8_t type)
{
struct wma_target_req *req_msg = NULL, *tmp;
bool found = false;
adf_os_spin_lock_bh(&wma->vdev_respq_lock);
list_for_each_entry_safe(req_msg, tmp, &wma->vdev_resp_queue, node) {
if (req_msg->vdev_id != vdev_id)
continue;
if (req_msg->type != type)
continue;
found = true;
break;
}
adf_os_spin_unlock_bh(&wma->vdev_respq_lock);
if (!found) {
WMA_LOGP("%s: target request not found for vdev_id %d type %d",
__func__, vdev_id, type);
return NULL;
}
WMA_LOGD("%s: target request found for vdev id: %d type %d msg %d",
__func__, vdev_id, type, req_msg->msg_type);
return req_msg;
}
/**
* wma_get_bpf_caps_event_handler() - Event handler for get bpf capability
* @handle: WMA global handle
* @cmd_param_info: command event data
* @len: Length of @cmd_param_info
*
* Return: 0 on Success or Errno on failure
*/
static int wma_get_bpf_caps_event_handler(void *handle,
u_int8_t *cmd_param_info,
u_int32_t len)
{
tp_wma_handle wma = (tp_wma_handle)handle;
WMI_BPF_CAPABILIY_INFO_EVENTID_param_tlvs *param_buf;
wmi_bpf_capability_info_evt_fixed_param *event;
struct sir_bpf_get_offload *bpf_get_offload;
tpAniSirGlobal pmac = (tpAniSirGlobal)vos_get_context(
VOS_MODULE_ID_PE, wma->vos_context);
if (!pmac) {
WMA_LOGE("%s: Invalid pmac", __func__);
return -EINVAL;
}
if (!pmac->sme.pbpf_get_offload_cb) {
WMA_LOGE("%s: Callback not registered", __func__);
return -EINVAL;
}
param_buf = (WMI_BPF_CAPABILIY_INFO_EVENTID_param_tlvs *)cmd_param_info;
event = param_buf->fixed_param;
bpf_get_offload = vos_mem_malloc(sizeof(*bpf_get_offload));
if (!bpf_get_offload) {
WMA_LOGP("%s: Memory allocation failed.", __func__);
return -ENOMEM;
}
bpf_get_offload->bpf_version = event->bpf_version;
bpf_get_offload->max_bpf_filters = event->max_bpf_filters;
bpf_get_offload->max_bytes_for_bpf_inst =
event->max_bytes_for_bpf_inst;
WMA_LOGD("%s: BPF capabilities version: %d max bpf filter size: %d",
__func__, bpf_get_offload->bpf_version,
bpf_get_offload->max_bytes_for_bpf_inst);
WMA_LOGD("%s: sending bpf capabilities event to hdd", __func__);
pmac->sme.pbpf_get_offload_cb(pmac->hHdd, bpf_get_offload);
vos_mem_free(bpf_get_offload);
return 0;
}
/**
* wma_lost_link_info_handler() - collect lost link information and inform SME
* when disconnection in STA mode.
* @wma: WMA handle
* @vdev_id: vdev ID
* @rssi: rssi at disconnection time
*
* Return: none
*/
static void wma_lost_link_info_handler(tp_wma_handle wma, uint32_t vdev_id,
int8_t rssi)
{
struct sir_lost_link_info *lost_link_info;
VOS_STATUS vos_status;
vos_msg_t sme_msg = {0};
/* report lost link information only for STA mode */
if (wma->interfaces[vdev_id].vdev_up &&
(WMI_VDEV_TYPE_STA == wma->interfaces[vdev_id].type) &&
(0 == wma->interfaces[vdev_id].sub_type)) {
lost_link_info = vos_mem_malloc(sizeof(*lost_link_info));
if (NULL == lost_link_info) {
WMA_LOGE("%s: failed to allocate memory", __func__);
return;
}
lost_link_info->vdev_id = vdev_id;
lost_link_info->rssi = rssi;
sme_msg.type = eWNI_SME_LOST_LINK_INFO_IND;
sme_msg.bodyptr = lost_link_info;
sme_msg.bodyval = 0;
WMA_LOGI("%s: post msg to SME, bss_idx %d, rssi %d",
__func__,
lost_link_info->vdev_id,
lost_link_info->rssi);
vos_status = vos_mq_post_message(VOS_MODULE_ID_SME, &sme_msg);
if (!VOS_IS_STATUS_SUCCESS(vos_status)) {
WMA_LOGE("%s: fail to post msg to SME",
__func__);
vos_mem_free(lost_link_info);
}
}
}
tSmpsModeValue host_map_smps_mode (A_UINT32 fw_smps_mode)
{
tSmpsModeValue smps_mode = SMPS_MODE_DISABLED;
switch (fw_smps_mode) {
case WMI_SMPS_FORCED_MODE_STATIC:
smps_mode = STATIC_SMPS_MODE;
break;
case WMI_SMPS_FORCED_MODE_DYNAMIC:
smps_mode = DYNAMIC_SMPS_MODE;
break;
default:
smps_mode = SMPS_MODE_DISABLED;
}
return smps_mode;
}
/**
* wma_smps_mode_to_force_mode_param() - Map smps mode to force
* mode commmand param
* @smps_mode: SMPS mode according to the protocol
*
* Return: int > 0 for success else failure
*/
static int wma_smps_mode_to_force_mode_param(uint8_t smps_mode)
{
int param = -EINVAL;
switch (smps_mode) {
case STATIC_SMPS_MODE:
param = WMI_SMPS_FORCED_MODE_STATIC;
break;
case DYNAMIC_SMPS_MODE:
param = WMI_SMPS_FORCED_MODE_DYNAMIC;
break;
case SMPS_MODE_DISABLED:
param = WMI_SMPS_FORCED_MODE_DISABLED;
break;
default:
WMA_LOGE(FL("smps mode cannot be mapped :%d "),
smps_mode);
}
return param;
}
#ifdef FEATURE_WLAN_AUTO_SHUTDOWN
/* function : wma_post_auto_shutdown_msg
* Description : function to post auto shutdown event to sme
*/
static int wma_post_auto_shutdown_msg(void)
{
tSirAutoShutdownEvtParams *auto_sh_evt;
VOS_STATUS vos_status;
vos_msg_t sme_msg = {0} ;
auto_sh_evt = (tSirAutoShutdownEvtParams *)
vos_mem_malloc(sizeof(tSirAutoShutdownEvtParams));
if (!auto_sh_evt) {
WMA_LOGE("%s: No Mem", __func__);
return -ENOMEM;
}
auto_sh_evt->shutdown_reason =
WMI_HOST_AUTO_SHUTDOWN_REASON_TIMER_EXPIRY;
sme_msg.type = eWNI_SME_AUTO_SHUTDOWN_IND;
sme_msg.bodyptr = auto_sh_evt;
sme_msg.bodyval = 0;
vos_status = vos_mq_post_message(VOS_MODULE_ID_SME, &sme_msg);
if ( !VOS_IS_STATUS_SUCCESS(vos_status) ) {
WMA_LOGE("Fail to post eWNI_SME_AUTO_SHUTDOWN_IND msg to SME");
vos_mem_free(auto_sh_evt);
return -EINVAL;
}
return 0;
}
/* function : wma_auto_shutdown_event_handler
* Description : function to process auto shutdown timer trigger
*/
static int wma_auto_shutdown_event_handler(void *handle, u_int8_t *event,
u_int32_t len)
{
wmi_host_auto_shutdown_event_fixed_param *wmi_auto_sh_evt;
WMI_HOST_AUTO_SHUTDOWN_EVENTID_param_tlvs *param_buf =
(WMI_HOST_AUTO_SHUTDOWN_EVENTID_param_tlvs *)
event;
if (!param_buf || !param_buf->fixed_param) {
WMA_LOGE("%s:%d: Invalid Auto shutdown timer evt", __func__,
__LINE__);
return -EINVAL;
}
wmi_auto_sh_evt = param_buf->fixed_param;
if (wmi_auto_sh_evt->shutdown_reason
!= WMI_HOST_AUTO_SHUTDOWN_REASON_TIMER_EXPIRY) {
WMA_LOGE("%s:%d: Invalid Auto shutdown timer evt", __func__,
__LINE__);
return -EINVAL;
}
WMA_LOGD("%s:%d: Auto Shutdown Evt: %d", __func__, __LINE__,
wmi_auto_sh_evt->shutdown_reason);
return(wma_post_auto_shutdown_msg());
}
/* function : wma_set_auto_shutdown_timer_req
* Description : function sets auto shutdown timer in firmware
* Args : wma handle, auto shutdown timer value
* Returns : status of wmi cmd
*/
static VOS_STATUS wma_set_auto_shutdown_timer_req(tp_wma_handle wma_handle,
tSirAutoShutdownCmdParams *auto_sh_cmd)
{
int status = 0;
wmi_buf_t buf = NULL;
u_int8_t *buf_ptr;
wmi_host_auto_shutdown_cfg_cmd_fixed_param *wmi_auto_sh_cmd;
int len = sizeof(wmi_host_auto_shutdown_cfg_cmd_fixed_param);
if (auto_sh_cmd == NULL) {
WMA_LOGE("%s : Invalid Autoshutdown cfg cmd", __func__);
return VOS_STATUS_E_FAILURE;
}
WMA_LOGD("%s: Set WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID:TIMER_VAL=%d",
__func__, auto_sh_cmd->timer_val);
buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
if (!buf) {
WMA_LOGE("%s : wmi_buf_alloc failed", __func__);
return VOS_STATUS_E_NOMEM;
}
buf_ptr = (u_int8_t *) wmi_buf_data(buf);
wmi_auto_sh_cmd = (wmi_host_auto_shutdown_cfg_cmd_fixed_param *)buf_ptr;
wmi_auto_sh_cmd->timer_value = auto_sh_cmd->timer_val;
WMITLV_SET_HDR(&wmi_auto_sh_cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_host_auto_shutdown_cfg_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(wmi_host_auto_shutdown_cfg_cmd_fixed_param));
status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf,
len, WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID);
if (status != EOK) {
WMA_LOGE("%s: WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID Err %d",
__func__, status);
wmi_buf_free(buf);
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
#endif
static void wma_vdev_start_rsp(tp_wma_handle wma,
tpAddBssParams add_bss,
wmi_vdev_start_response_event_fixed_param *resp_event)
{
struct beacon_info *bcn;
#ifdef QCA_IBSS_SUPPORT
WMA_LOGD("%s: vdev start response received for %s mode", __func__,
add_bss->operMode == BSS_OPERATIONAL_MODE_IBSS ? "IBSS" : "non-IBSS");
#endif
if (resp_event->status) {
add_bss->status = VOS_STATUS_E_FAILURE;
goto send_fail_resp;
}
if ((add_bss->operMode == BSS_OPERATIONAL_MODE_AP)
#ifdef QCA_IBSS_SUPPORT
|| (add_bss->operMode == BSS_OPERATIONAL_MODE_IBSS)
#endif
) {
wma->interfaces[resp_event->vdev_id].beacon =
vos_mem_malloc(sizeof(struct beacon_info));
bcn = wma->interfaces[resp_event->vdev_id].beacon;
if (!bcn) {
WMA_LOGE("%s: Failed alloc memory for beacon struct",
__func__);
add_bss->status = VOS_STATUS_E_FAILURE;
goto send_fail_resp;
}
vos_mem_zero(bcn, sizeof(*bcn));
bcn->buf = adf_nbuf_alloc(NULL, WMA_BCN_BUF_MAX_SIZE, 0,
sizeof(u_int32_t), 0);
if (!bcn->buf) {
WMA_LOGE("%s: No memory allocated for beacon buffer",
__func__);
vos_mem_free(bcn);
add_bss->status = VOS_STATUS_E_FAILURE;
goto send_fail_resp;
}
bcn->seq_no = MIN_SW_SEQ;
adf_os_spinlock_init(&bcn->lock);
adf_os_atomic_set(&wma->interfaces[resp_event->vdev_id].bss_status,
WMA_BSS_STATUS_STARTED);
WMA_LOGD("%s: AP mode (type %d subtype %d) BSS is started", __func__,
wma->interfaces[resp_event->vdev_id].type,
wma->interfaces[resp_event->vdev_id].sub_type);
WMA_LOGD("%s: Allocated beacon struct %pK, template memory %pK",
__func__, bcn, bcn->buf);
}
add_bss->status = VOS_STATUS_SUCCESS;
add_bss->bssIdx = resp_event->vdev_id;
add_bss->chainMask = resp_event->chain_mask;
add_bss->smpsMode = host_map_smps_mode(resp_event->smps_mode);
send_fail_resp:
WMA_LOGD("%s: Sending add bss rsp to umac(vdev %d status %d)",
__func__, resp_event->vdev_id, add_bss->status);
wma_send_msg(wma, WDA_ADD_BSS_RSP, (void *)add_bss, 0);
}
static int wma_vdev_start_resp_handler(void *handle, u_int8_t *cmd_param_info,
u_int32_t len)
{
WMI_VDEV_START_RESP_EVENTID_param_tlvs *param_buf;
wmi_vdev_start_response_event_fixed_param *resp_event;
u_int8_t *buf;
vos_msg_t vos_msg = {0};
tp_wma_handle wma = (tp_wma_handle) handle;
ol_txrx_pdev_handle pdev = NULL;
WMA_LOGI("%s: Enter", __func__);
param_buf = (WMI_VDEV_START_RESP_EVENTID_param_tlvs *) cmd_param_info;
if (!param_buf) {
WMA_LOGE("Invalid start response event buffer");
return -EINVAL;
}
pdev = vos_get_context(VOS_MODULE_ID_TXRX, wma->vos_context);
if (pdev == NULL) {
WMA_LOGE("vdev start resp fail as pdev is NULL");
return -EINVAL;
}
resp_event = param_buf->fixed_param;
buf = vos_mem_malloc(sizeof(wmi_vdev_start_response_event_fixed_param));
if (!buf) {
WMA_LOGE("%s: Failed alloc memory for buf", __func__);
return -EINVAL;
}
if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id)) {
adf_os_spin_lock_bh(&wma->dfs_ic->chan_lock);
wma->dfs_ic->disable_phy_err_processing = false;
adf_os_spin_unlock_bh(&wma->dfs_ic->chan_lock);
}
if (wma->pause_other_vdev_on_mcc_start) {
WMA_LOGD("%s: unpause other vdevs since paused when MCC start", __func__);
wma->pause_other_vdev_on_mcc_start = false;
wdi_in_pdev_unpause_other_vdev(pdev,
OL_TXQ_PAUSE_REASON_MCC_VDEV_START,
resp_event->vdev_id);
}
vos_mem_zero(buf, sizeof(wmi_vdev_start_response_event_fixed_param));
vos_mem_copy(buf, (u_int8_t *)resp_event,
sizeof(wmi_vdev_start_response_event_fixed_param));
vos_msg.type = WDA_VDEV_START_RSP_IND;
vos_msg.bodyptr = buf;
vos_msg.bodyval = 0;
if (VOS_STATUS_SUCCESS !=
vos_mq_post_message(VOS_MQ_ID_WDA, &vos_msg)) {
WMA_LOGP("%s: Failed to post WDA_VDEV_START_RSP_IND msg", __func__);
vos_mem_free(buf);
return -1;
}
WMA_LOGD("WDA_VDEV_START_RSP_IND posted");
return 0;
}
#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
/**
* wma_find_mcc_ap() - finds if device is operating AP in MCC mode or not
* @wma: wma handle.
* @vdev_id: vdev ID of device for which MCC has to be checked
* @add: flag indicating if current device is added or deleted
*
* This function parses through all the interfaces in wma and finds if
* any of those devces are in MCC mode with AP. If such a vdev is found
* involved AP vdevs are sent WDA_UPDATE_Q2Q_IE_IND msg to update their
* beacon template to include Q2Q IE.
*
* Return: void
*/
void wma_find_mcc_ap(tp_wma_handle wma,
uint8_t vdev_id,
bool add)
{
uint8_t i;
uint16_t prev_ch_freq = 0;
bool is_ap = false;
bool result = false;
uint8_t * ap_vdev_ids = NULL;
uint8_t num_ch = 0;
ap_vdev_ids = vos_mem_malloc(wma->max_bssid);
if (!ap_vdev_ids) {
return;
}
for(i = 0; i < wma->max_bssid; i++) {
ap_vdev_ids[i] = -1;
if( add == false && i == vdev_id)
continue;
if( wma->interfaces[i].vdev_up || (i == vdev_id && add) ) {
if(wma->interfaces[i].type == WMI_VDEV_TYPE_AP) {
is_ap = true;
ap_vdev_ids[i] = i;
}
if(wma->interfaces[i].mhz != prev_ch_freq) {
num_ch++;
prev_ch_freq = wma->interfaces[i].mhz;
}
}
}
if( is_ap && (num_ch > 1) )
result = true;
else
result = false;
wma_send_msg(wma, WDA_UPDATE_Q2Q_IE_IND, (void*)ap_vdev_ids, result);
}
#endif /* FEATURE_AP_MCC_CH_AVOIDANCE */
static const wmi_channel_width mode_to_width[MODE_MAX] =
{
[MODE_11A] = WMI_CHAN_WIDTH_20,
[MODE_11G] = WMI_CHAN_WIDTH_20,
[MODE_11B] = WMI_CHAN_WIDTH_20,
[MODE_11GONLY] = WMI_CHAN_WIDTH_20,
[MODE_11NA_HT20] = WMI_CHAN_WIDTH_20,
[MODE_11NG_HT20] = WMI_CHAN_WIDTH_20,
[MODE_11AC_VHT20] = WMI_CHAN_WIDTH_20,
[MODE_11AC_VHT20_2G] = WMI_CHAN_WIDTH_20,
[MODE_11NA_HT40] = WMI_CHAN_WIDTH_40,
[MODE_11NG_HT40] = WMI_CHAN_WIDTH_40,
[MODE_11AC_VHT40] = WMI_CHAN_WIDTH_40,
[MODE_11AC_VHT40_2G] = WMI_CHAN_WIDTH_40,
[MODE_11AC_VHT80] = WMI_CHAN_WIDTH_80,
#if CONFIG_160MHZ_SUPPORT
[MODE_11AC_VHT80_80] = WMI_CHAN_WIDTH_80P80,
[MODE_11AC_VHT160] = WMI_CHAN_WIDTH_160,
#endif
};
/**
* chanmode_to_chanwidth() - get channel width through channel mode
* @chanmode: channel phy mode
*
* Return: channel width
*/
static wmi_channel_width chanmode_to_chanwidth(WLAN_PHY_MODE chanmode)
{
wmi_channel_width chan_width;
if (chanmode >= MODE_11A && chanmode < MODE_MAX)
chan_width = mode_to_width[chanmode];
else
chan_width = WMI_CHAN_WIDTH_20;
return chan_width;
}
static int wma_vdev_start_rsp_ind(tp_wma_handle wma, u_int8_t *buf)
{
struct wma_target_req *req_msg;
struct wma_txrx_node *iface;
int err;
wmi_channel_width chanwidth;
wmi_vdev_start_response_event_fixed_param *resp_event;
#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
tpAniSirGlobal mac_ctx = (tpAniSirGlobal)vos_get_context(
VOS_MODULE_ID_PE,
wma->vos_context);
if (NULL == mac_ctx) {
WMA_LOGE("%s: Failed to get mac_ctx", __func__);
return -EINVAL;
}
#endif /* FEATURE_AP_MCC_CH_AVOIDANCE */
resp_event = (wmi_vdev_start_response_event_fixed_param *)buf;
if (!resp_event) {
WMA_LOGE("Invalid start response event buffer");
return -EINVAL;
}
if (resp_event->vdev_id >= wma->max_bssid) {
WMA_LOGE("%s: received invalid vdev_id %d",
__func__, resp_event->vdev_id);
return -EINVAL;
}
iface = &wma->interfaces[resp_event->vdev_id];
if ((resp_event->vdev_id < wma->max_bssid) &&
(adf_os_atomic_read(
&wma->interfaces[resp_event->vdev_id].vdev_restart_params.hidden_ssid_restart_in_progress)) &&
(wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == true)) {
WMA_LOGE(
"%s: vdev restart event recevied for hidden ssid set using IOCTL",
__func__);
if (wmi_unified_vdev_up_send(wma->wmi_handle, resp_event->vdev_id, 0,
wma->interfaces[resp_event->vdev_id].bssid) < 0) {
WMA_LOGE("%s : failed to send vdev up", __func__);
return -EEXIST;
}
adf_os_atomic_set(
&wma->interfaces[resp_event->vdev_id].vdev_restart_params.hidden_ssid_restart_in_progress, 0);
wma->interfaces[resp_event->vdev_id].vdev_up = TRUE;
/*
* Unpause TX queue in SAP case while configuring hidden ssid
* enable or disable, else the data path is paused forever
* causing data packets(starting from DHCP offer) to get stuck
*/
wdi_in_vdev_unpause(iface->handle,
OL_TXQ_PAUSE_REASON_VDEV_STOP);
iface->pause_bitmap &= ~(1 << PAUSE_TYPE_HOST);
}
req_msg = wma_find_vdev_req(wma, resp_event->vdev_id,
WMA_TARGET_REQ_TYPE_VDEV_START);
if (!req_msg) {
WMA_LOGE("%s: Failed to lookup request message for vdev %d",
__func__, resp_event->vdev_id);
return -EINVAL;
}
vos_timer_stop(&req_msg->event_timeout);
#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
if (resp_event->status == VOS_STATUS_SUCCESS
&& mac_ctx->sap.sap_channel_avoidance)
wma_find_mcc_ap(wma, resp_event->vdev_id, true);
#endif /* FEATURE_AP_MCC_CH_AVOIDANCE */
if (req_msg->msg_type == WDA_CHNL_SWITCH_REQ) {
tpSwitchChannelParams params =
(tpSwitchChannelParams) req_msg->user_data;
if(!params) {
WMA_LOGE("%s: channel switch params is NULL for vdev %d",
__func__, resp_event->vdev_id);
return -EINVAL;
}
WMA_LOGD("%s: Send channel switch resp vdev %d status %d",
__func__, resp_event->vdev_id, resp_event->status);
params->chainMask = resp_event->chain_mask;
params->smpsMode = host_map_smps_mode(resp_event->smps_mode);
params->status = resp_event->status;
if (wma->interfaces[resp_event->vdev_id].is_channel_switch)
wma->interfaces[resp_event->vdev_id].is_channel_switch =
VOS_FALSE;
if (((resp_event->resp_type == WMI_VDEV_RESTART_RESP_EVENT) &&
(iface->type == WMI_VDEV_TYPE_STA)) ||
((resp_event->resp_type == WMI_VDEV_START_RESP_EVENT) &&
(iface->type == WMI_VDEV_TYPE_MONITOR))) {
err = wma_set_peer_param(wma, iface->bssid,
WMI_PEER_PHYMODE, iface->chanmode,
resp_event->vdev_id);
WMA_LOGD("%s:vdev_id %d chanmode %d status %d",
__func__, resp_event->vdev_id,
iface->chanmode, err);
chanwidth = chanmode_to_chanwidth(iface->chanmode);
err = wma_set_peer_param(wma, iface->bssid,
WMI_PEER_CHWIDTH, chanwidth,
resp_event->vdev_id);
WMA_LOGD("%s:vdev_id %d chanwidth %d status %d",
__func__, resp_event->vdev_id,
chanwidth, err);
if (wmi_unified_vdev_up_send(wma->wmi_handle,
resp_event->vdev_id, iface->aid,
iface->bssid)) {
WMA_LOGE("%s:vdev_up failed vdev_id %d",
__func__, resp_event->vdev_id);
wma->interfaces[resp_event->vdev_id].vdev_up =
FALSE;
} else {
wma->interfaces[resp_event->vdev_id].vdev_up =
TRUE;
}
}
wma_send_msg(wma, WDA_SWITCH_CHANNEL_RSP, (void *)params, 0);
} else if (req_msg->msg_type == WDA_ADD_BSS_REQ) {
tpAddBssParams bssParams = (tpAddBssParams) req_msg->user_data;
vos_mem_copy(iface->bssid, bssParams->bssId, ETH_ALEN);
wma_vdev_start_rsp(wma, bssParams, resp_event);
} else if (req_msg->msg_type == WDA_OCB_SET_CONFIG_CMD) {
if (wmi_unified_vdev_up_send(wma->wmi_handle,
resp_event->vdev_id, iface->aid,
iface->bssid) < 0) {
WMA_LOGE(FL("failed to send vdev up"));
return -EEXIST;
}
iface->vdev_up = TRUE;
wma_ocb_start_resp_ind_cont(wma);
}
if ((wma->interfaces[resp_event->vdev_id].type == WMI_VDEV_TYPE_AP) &&
wma->interfaces[resp_event->vdev_id].vdev_up)
wma_set_sap_keepalive(wma, resp_event->vdev_id);
vos_timer_destroy(&req_msg->event_timeout);
adf_os_mem_free(req_msg);
return 0;
}
#define BIG_ENDIAN_MAX_DEBUG_BUF 500
/* function : wma_unified_debug_print_event_handler
* Description :
* Args :
* Returns :
*/
static int wma_unified_debug_print_event_handler(void *handle, u_int8_t *datap,
u_int32_t len)
{
WMI_DEBUG_PRINT_EVENTID_param_tlvs *param_buf;
u_int8_t *data;
u_int32_t datalen;
param_buf = (WMI_DEBUG_PRINT_EVENTID_param_tlvs *)datap;
if (!param_buf) {
WMA_LOGE("Get NULL point message from FW");
return -ENOMEM;
}
data = param_buf->data;
datalen = param_buf->num_data;
#ifdef BIG_ENDIAN_HOST
{
if (datalen > BIG_ENDIAN_MAX_DEBUG_BUF) {
WMA_LOGE("%s Invalid data len %d, limiting to max",
__func__, datalen);
datalen = BIG_ENDIAN_MAX_DEBUG_BUF;
}
char dbgbuf[BIG_ENDIAN_MAX_DEBUG_BUF] = { 0 };
memcpy(dbgbuf, data, datalen);
SWAPME(dbgbuf, datalen);
WMA_LOGD("FIRMWARE:%s", dbgbuf);
return 0;
}
#else
WMA_LOGD("FIRMWARE:%s", data);
return 0;
#endif
}
int wmi_unified_vdev_set_param_send(wmi_unified_t wmi_handle, u_int32_t if_id,
u_int32_t param_id, u_int32_t param_value)
{
int ret;
wmi_vdev_set_param_cmd_fixed_param *cmd;
wmi_buf_t buf;
u_int16_t len = sizeof(*cmd);
buf = wmi_buf_alloc(wmi_handle, len);
if (!buf) {
WMA_LOGE("%s:wmi_buf_alloc failed", __func__);
return -ENOMEM;
}
cmd = (wmi_vdev_set_param_cmd_fixed_param *) wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_vdev_set_param_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_vdev_set_param_cmd_fixed_param));
cmd->vdev_id = if_id;
cmd->param_id = param_id;
cmd->param_value = param_value;
WMA_LOGD("Setting vdev %d param = %x, value = %u",
if_id, param_id, param_value);
ret = wmi_unified_cmd_send(wmi_handle, buf, len,
WMI_VDEV_SET_PARAM_CMDID);
if (ret < 0) {
WMA_LOGE("Failed to send set param command ret = %d", ret);
wmi_buf_free(buf);
}
return ret;
}
VOS_STATUS wma_roam_scan_bmiss_cnt(tp_wma_handle wma_handle,
A_INT32 first_bcnt,
A_UINT32 final_bcnt,
u_int32_t vdev_id)
{
int status = 0;
WMA_LOGI("%s: first_bcnt=%d, final_bcnt=%d", __func__,
first_bcnt, final_bcnt);
status = wmi_unified_vdev_set_param_send(wma_handle->wmi_handle,
vdev_id,
WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
first_bcnt);
if (status != EOK) {
WMA_LOGE("wmi_unified_vdev_set_param_send"
"WMI_VDEV_PARAM_BMISS_FIRST_BCNT returned Error %d",status);
return VOS_STATUS_E_FAILURE;
}
status = wmi_unified_vdev_set_param_send(wma_handle->wmi_handle,
vdev_id,
WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
final_bcnt);
if (status != EOK) {
WMA_LOGE("wmi_unified_vdev_set_param_send"
"WMI_VDEV_PARAM_BMISS_FINAL_BCNT returned Error %d",status);
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
static v_VOID_t wma_set_default_tgt_config(tp_wma_handle wma_handle)
{
struct ol_softc *scn;
u_int8_t no_of_peers_supported;
wmi_resource_config tgt_cfg = {
0, /* Filling zero for TLV Tag and Length fields */
CFG_TGT_NUM_VDEV,
CFG_TGT_NUM_PEERS + CFG_TGT_NUM_VDEV + 2,
CFG_TGT_NUM_OFFLOAD_PEERS,
CFG_TGT_NUM_OFFLOAD_REORDER_BUFFS,
CFG_TGT_NUM_PEER_KEYS,
CFG_TGT_NUM_TIDS,
CFG_TGT_AST_SKID_LIMIT,
CFG_TGT_DEFAULT_TX_CHAIN_MASK,
CFG_TGT_DEFAULT_RX_CHAIN_MASK,
{ CFG_TGT_RX_TIMEOUT_LO_PRI, CFG_TGT_RX_TIMEOUT_LO_PRI, CFG_TGT_RX_TIMEOUT_LO_PRI, CFG_TGT_RX_TIMEOUT_HI_PRI },
CFG_TGT_RX_DECAP_MODE,
CFG_TGT_DEFAULT_SCAN_MAX_REQS,
CFG_TGT_DEFAULT_BMISS_OFFLOAD_MAX_VDEV,
CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_VDEV,
CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_PROFILES,
CFG_TGT_DEFAULT_NUM_MCAST_GROUPS,
CFG_TGT_DEFAULT_NUM_MCAST_TABLE_ELEMS,
CFG_TGT_DEFAULT_MCAST2UCAST_MODE,
CFG_TGT_DEFAULT_TX_DBG_LOG_SIZE,
CFG_TGT_WDS_ENTRIES,
CFG_TGT_DEFAULT_DMA_BURST_SIZE,
CFG_TGT_DEFAULT_MAC_AGGR_DELIM,
CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK,
CFG_TGT_DEFAULT_VOW_CONFIG,
CFG_TGT_DEFAULT_GTK_OFFLOAD_MAX_VDEV,
CFG_TGT_NUM_MSDU_DESC,
CFG_TGT_MAX_FRAG_TABLE_ENTRIES,
CFG_TGT_NUM_TDLS_VDEVS,
CFG_TGT_NUM_TDLS_CONN_TABLE_ENTRIES,
CFG_TGT_DEFAULT_BEACON_TX_OFFLOAD_MAX_VDEV,
CFG_TGT_MAX_MULTICAST_FILTER_ENTRIES,
0,
0,
0,
CFG_TGT_NUM_TDLS_CONC_SLEEP_STAS,
CFG_TGT_NUM_TDLS_CONC_BUFFER_STAS,
0,
CFG_TGT_NUM_OCB_VDEVS,
CFG_TGT_NUM_OCB_CHANNELS,
CFG_TGT_NUM_OCB_SCHEDULES,
};
/* Update the max number of peers */
scn = vos_get_context(VOS_MODULE_ID_HIF, wma_handle->vos_context);
if (!scn) {
WMA_LOGE("%s: vos_context is NULL", __func__);
return;
}
no_of_peers_supported = ol_get_number_of_peers_supported(scn);
tgt_cfg.num_peers = no_of_peers_supported + CFG_TGT_NUM_VDEV + 2;
#if defined(CONFIG_HL_SUPPORT)
tgt_cfg.num_tids = 4 * no_of_peers_supported;
#else
tgt_cfg.num_tids = (2 * (no_of_peers_supported + CFG_TGT_NUM_VDEV + 2));
#endif
WMITLV_SET_HDR(&tgt_cfg.tlv_header,WMITLV_TAG_STRUC_wmi_resource_config,
WMITLV_GET_STRUCT_TLVLEN(wmi_resource_config));
/* reduce the peer/vdev if CFG_TGT_NUM_MSDU_DESC exceeds 1000 */
#ifdef PERE_IP_HDR_ALIGNMENT_WAR
if (scn->host_80211_enable) {
/*
* To make the IP header begins at dword aligned address,
* we make the decapsulation mode as Native Wifi.
*/
tgt_cfg.rx_decap_mode = CFG_TGT_RX_DECAP_MODE_NWIFI;
}
#endif
if (VOS_MONITOR_MODE == vos_get_conparam())
tgt_cfg.rx_decap_mode = CFG_TGT_RX_DECAP_MODE_RAW;
wma_handle->wlan_resource_config = tgt_cfg;
}
static int32_t wmi_unified_peer_delete_send(wmi_unified_t wmi,
u_int8_t peer_addr[IEEE80211_ADDR_LEN],
u_int8_t vdev_id)
{
wmi_peer_delete_cmd_fixed_param *cmd;
wmi_buf_t buf;
int32_t len = sizeof(*cmd);
buf = wmi_buf_alloc(wmi, len);
if (!buf) {
WMA_LOGP("%s: wmi_buf_alloc failed", __func__);
return -ENOMEM;
}
cmd = (wmi_peer_delete_cmd_fixed_param *) wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_peer_delete_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_peer_delete_cmd_fixed_param));
WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr);
cmd->vdev_id = vdev_id;
if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_DELETE_CMDID)) {
WMA_LOGP("%s: Failed to send peer delete command", __func__);
wmi_buf_free(buf);
return -EIO;
}
WMA_LOGD("%s: peer_addr %pM vdev_id %d", __func__, peer_addr, vdev_id);
return 0;
}
static int32_t wmi_unified_peer_flush_tids_send(wmi_unified_t wmi,
u_int8_t peer_addr
[IEEE80211_ADDR_LEN],
u_int32_t peer_tid_bitmap,
u_int8_t vdev_id)
{
wmi_peer_flush_tids_cmd_fixed_param *cmd;
wmi_buf_t buf;
int32_t len = sizeof(*cmd);
buf = wmi_buf_alloc(wmi, len);
if (!buf) {
WMA_LOGP("%s: wmi_buf_alloc failed", __func__);
return -ENOMEM;
}
cmd = (wmi_peer_flush_tids_cmd_fixed_param *) wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_peer_flush_tids_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_peer_flush_tids_cmd_fixed_param));
WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr);
cmd->peer_tid_bitmap = peer_tid_bitmap;
cmd->vdev_id = vdev_id;
if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_FLUSH_TIDS_CMDID)) {
WMA_LOGP("%s: Failed to send flush tid command", __func__);
wmi_buf_free(buf);
return -EIO;
}
WMA_LOGD("%s: peer_addr %pM vdev_id %d", __func__, peer_addr, vdev_id);
return 0;
}
void wma_remove_peer(tp_wma_handle wma, u_int8_t *bssid,
u_int8_t vdev_id, ol_txrx_peer_handle peer,
v_BOOL_t roam_synch_in_progress)
{
#define PEER_ALL_TID_BITMASK 0xffffffff
u_int32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
u_int8_t *peer_addr = bssid;
if (!wma->interfaces[vdev_id].peer_count)
{
WMA_LOGE("%s: Can't remove peer with peer_addr %pM vdevid %d peer_count %d",
__func__, bssid, vdev_id, wma->interfaces[vdev_id].peer_count);
return;
}
#ifdef WLAN_FEATURE_ROAM_OFFLOAD
if (roam_synch_in_progress) {
WMA_LOGE("%s:LFR3:Removing peer with addr %pM vdevid %d peer_cnt %d",
__func__, bssid, vdev_id, wma->interfaces[vdev_id].peer_count);
goto peer_detach;
} else {
WMA_LOGI("%s: Removing peer with addr %pM vdevid %d peer_count %d",
__func__, bssid, vdev_id, wma->interfaces[vdev_id].peer_count);
}
#endif
/* Flush all TIDs except MGMT TID for this peer in Target */
peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
wmi_unified_peer_flush_tids_send(wma->wmi_handle, bssid,
peer_tid_bitmap, vdev_id);
#if defined(QCA_IBSS_SUPPORT)
if ((peer) && (wma_is_vdev_in_ibss_mode(wma, vdev_id))) {
WMA_LOGD("%s: bssid %pM peer->mac_addr %pM", __func__,
bssid, peer->mac_addr.raw);
peer_addr = peer->mac_addr.raw;
}
#endif
wmi_unified_peer_delete_send(wma->wmi_handle, peer_addr, vdev_id);
#ifdef WLAN_FEATURE_ROAM_OFFLOAD
peer_detach:
#endif
if (peer)
ol_txrx_peer_detach(peer);
wma->interfaces[vdev_id].peer_count--;
#undef PEER_ALL_TID_BITMASK
}
static int wma_peer_sta_kickout_event_handler(void *handle, u8 *event, u32 len)
{
tp_wma_handle wma = (tp_wma_handle)handle;
WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL;
wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL;
u_int8_t vdev_id, peer_id, macaddr[IEEE80211_ADDR_LEN];
ol_txrx_peer_handle peer;
ol_txrx_pdev_handle pdev;
tpDeleteStaContext del_sta_ctx;
tpSirIbssPeerInactivityInd p_inactivity;
WMA_LOGD("%s: Enter", __func__);
param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) event;
kickout_event = param_buf->fixed_param;
pdev = vos_get_context(VOS_MODULE_ID_TXRX, wma->vos_context);
if (!pdev) {
WMA_LOGE("%s: pdev is NULL", __func__);
return -EINVAL;
}
WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, macaddr);
peer = ol_txrx_find_peer_by_addr(pdev, macaddr, &peer_id);
if (!peer) {
WMA_LOGE("PEER [%pM] not found", macaddr);
return -EINVAL;
}
if (tl_shim_get_vdevid(peer, &vdev_id) != VOS_STATUS_SUCCESS) {
WMA_LOGE("Not able to find BSSID for peer [%pM]", macaddr);
return -EINVAL;
}
WMA_LOGA("%s: PEER:[%pM], ADDR:[%pN], INTERFACE:%d, peer_id:%d, reason:%d",
__func__, macaddr,
wma->interfaces[vdev_id].addr, vdev_id,
peer_id, kickout_event->reason);
switch (kickout_event->reason) {
case WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT:
p_inactivity = (tpSirIbssPeerInactivityInd)
vos_mem_malloc(sizeof(tSirIbssPeerInactivityInd));
if (!p_inactivity) {
WMA_LOGE("VOS MEM Alloc Failed for tSirIbssPeerInactivity");
return -EINVAL;
}
p_inactivity->staIdx = peer_id;
vos_mem_copy(p_inactivity->peerAddr, macaddr, IEEE80211_ADDR_LEN);
wma_send_msg(wma, WDA_IBSS_PEER_INACTIVITY_IND, (void *)p_inactivity, 0);
goto exit_handler;
break;
#ifdef FEATURE_WLAN_TDLS
case WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT:
del_sta_ctx =
(tpDeleteStaContext)vos_mem_malloc(sizeof(tDeleteStaContext));
if (!del_sta_ctx) {
WMA_LOGE("%s: mem alloc failed for tDeleteStaContext for TDLS peer: %pM",
__func__, macaddr);
return -EINVAL;
}
del_sta_ctx->is_tdls = true;
del_sta_ctx->vdev_id = vdev_id;
del_sta_ctx->staId = peer_id;
vos_mem_copy(del_sta_ctx->addr2, macaddr, IEEE80211_ADDR_LEN);
vos_mem_copy(del_sta_ctx->bssId, wma->interfaces[vdev_id].bssid,
IEEE80211_ADDR_LEN);
del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx,
0);
goto exit_handler;
break;
#endif /* FEATURE_WLAN_TDLS */
case WMI_PEER_STA_KICKOUT_REASON_XRETRY:
if(wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA &&
(wma->interfaces[vdev_id].sub_type == 0 ||
wma->interfaces[vdev_id].sub_type ==
WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) &&
vos_mem_compare(wma->interfaces[vdev_id].bssid,
macaddr, ETH_ALEN)) {
/*
* KICKOUT event is for current station-AP connection.
* Treat it like final beacon miss. Station may not have
* missed beacons but not able to transmit frames to AP
* for a long time. Must disconnect to get out of
* this sticky situation.
* In future implementation, roaming module will also
* handle this event and perform a scan.
*/
WMA_LOGW("%s: WMI_PEER_STA_KICKOUT_REASON_XRETRY event for STA",
__func__);
wma_beacon_miss_handler(wma, vdev_id, kickout_event->rssi);
goto exit_handler;
}
break;
case WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED:
/*
* Default legacy value used by original firmware implementation.
*/
if(wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA &&
(wma->interfaces[vdev_id].sub_type == 0 ||
wma->interfaces[vdev_id].sub_type ==
WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) &&
vos_mem_compare(wma->interfaces[vdev_id].bssid,
macaddr, ETH_ALEN)) {
/*
* KICKOUT event is for current station-AP connection.
* Treat it like final beacon miss. Station may not have
* missed beacons but not able to transmit frames to AP
* for a long time. Must disconnect to get out of
* this sticky situation.
* In future implementation, roaming module will also
* handle this event and perform a scan.
*/
WMA_LOGW("%s: WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED event for STA",
__func__);
wma_beacon_miss_handler(wma, vdev_id, kickout_event->rssi);
goto exit_handler;
}
break;
case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
/* This could be for STA or SAP role */
default:
break;
}
/*
* default action is to send delete station context indication to LIM
*/
del_sta_ctx = (tpDeleteStaContext)vos_mem_malloc(sizeof(tDeleteStaContext));
if (!del_sta_ctx) {
WMA_LOGE("VOS MEM Alloc Failed for tDeleteStaContext");
return -EINVAL;
}
del_sta_ctx->is_tdls = false;
del_sta_ctx->vdev_id = vdev_id;
del_sta_ctx->staId = peer_id;
vos_mem_copy(del_sta_ctx->addr2, macaddr, IEEE80211_ADDR_LEN);
vos_mem_copy(del_sta_ctx->bssId, wma->interfaces[vdev_id].addr,
IEEE80211_ADDR_LEN);
del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
del_sta_ctx->rssi = kickout_event->rssi + WMA_TGT_NOISE_FLOOR_DBM;
wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx, 0);
wma_lost_link_info_handler(wma, vdev_id, kickout_event->rssi +
WMA_TGT_NOISE_FLOOR_DBM);
exit_handler:
WMA_LOGD("%s: Exit", __func__);
return 0;
}
static int wmi_unified_vdev_down_send(wmi_unified_t wmi, u_int8_t vdev_id)
{
wmi_vdev_down_cmd_fixed_param *cmd;
wmi_buf_t buf;
int32_t len = sizeof(*cmd);
buf = wmi_buf_alloc(wmi, len);
if (!buf) {
WMA_LOGP("%s : wmi_buf_alloc failed", __func__);
return -ENOMEM;
}
cmd = (wmi_vdev_down_cmd_fixed_param *) wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_vdev_down_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_down_cmd_fixed_param));
cmd->vdev_id = vdev_id;
if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_DOWN_CMDID)) {
WMA_LOGP("%s: Failed to send vdev down", __func__);
wmi_buf_free(buf);
return -EIO;
}
WMA_LOGD("%s: vdev_id %d", __func__, vdev_id);
return 0;
}
#ifdef QCA_IBSS_SUPPORT
static void wma_delete_all_ibss_peers(tp_wma_handle wma, A_UINT32 vdev_id)
{
ol_txrx_vdev_handle vdev;
ol_txrx_peer_handle peer, temp;
if (!wma || vdev_id >= wma->max_bssid)
return;
vdev = wma->interfaces[vdev_id].handle;
if (!vdev)
return;
/* remove all remote peers of IBSS */
adf_os_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
temp = NULL;
TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t, peer_list_elem) {
if (temp) {
adf_os_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
if (adf_os_atomic_read(&temp->delete_in_progress) == 0){
wma_remove_peer(wma, temp->mac_addr.raw,
vdev_id, temp, VOS_FALSE);
}
adf_os_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
}
/* self peer is deleted last */
if (peer == TAILQ_FIRST(&vdev->peer_list)) {
WMA_LOGE("%s: self peer removed by caller ", __func__);
break;
} else
temp = peer;
}
adf_os_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
/* remove IBSS bss peer last */
peer = TAILQ_FIRST(&vdev->peer_list);
wma_remove_peer(wma, wma->interfaces[vdev_id].bssid, vdev_id, peer,
VOS_FALSE);
}
#endif //#ifdef QCA_IBSS_SUPPORT
static void wma_delete_all_ap_remote_peers(tp_wma_handle wma, A_UINT32 vdev_id)
{
ol_txrx_vdev_handle vdev;
ol_txrx_peer_handle peer, temp;
if (!wma || vdev_id >= wma->max_bssid)
return;
vdev = wma->interfaces[vdev_id].handle;
if (!vdev)
return;
WMA_LOGE("%s: vdev_id - %d", __func__, vdev_id);
/* remove all remote peers of SAP */
adf_os_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
temp = NULL;
TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t, peer_list_elem) {
if (temp) {
adf_os_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
if (adf_os_atomic_read(&temp->delete_in_progress) == 0){
wma_remove_peer(wma, temp->mac_addr.raw,
vdev_id, temp, VOS_FALSE);
}
adf_os_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
}
/* self peer is deleted by caller */
if (peer == TAILQ_FIRST(&vdev->peer_list)){
WMA_LOGE("%s: self peer removed by caller ", __func__);
break;
} else
temp = peer;
}
adf_os_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
}
static int wma_vdev_stop_resp_handler(void *handle, u_int8_t *cmd_param_info,
u32 len)
{
WMI_VDEV_STOPPED_EVENTID_param_tlvs *param_buf;
wmi_vdev_stopped_event_fixed_param *event;
u_int8_t *buf;
vos_msg_t vos_msg = {0};
WMA_LOGI("%s: Enter", __func__);
param_buf = (WMI_VDEV_STOPPED_EVENTID_param_tlvs *) cmd_param_info;
if (!param_buf) {
WMA_LOGE("Invalid event buffer");
return -EINVAL;
}
event = param_buf->fixed_param;
buf = vos_mem_malloc(sizeof(wmi_vdev_stopped_event_fixed_param));
if (!buf) {
WMA_LOGE("%s: Failed alloc memory for buf", __func__);
return -EINVAL;
}
vos_mem_zero(buf, sizeof(wmi_vdev_stopped_event_fixed_param));
vos_mem_copy(buf, (u_int8_t *)event,
sizeof(wmi_vdev_stopped_event_fixed_param));
vos_msg.type = WDA_VDEV_STOP_IND;
vos_msg.bodyptr = buf;
vos_msg.bodyval = 0;
if (VOS_STATUS_SUCCESS !=
vos_mq_post_message(VOS_MQ_ID_WDA, &vos_msg)) {
WMA_LOGP("%s: Failed to post WDA_VDEV_STOP_IND msg", __func__);
vos_mem_free(buf);
return -1;
}
WMA_LOGD("WDA_VDEV_STOP_IND posted");
return 0;
}
void wma_hidden_ssid_vdev_restart_on_vdev_stop(tp_wma_handle wma_handle, u_int8_t sessionId)
{
wmi_vdev_start_request_cmd_fixed_param *cmd;
wmi_buf_t buf;
wmi_channel *chan;
int32_t len;
u_int8_t *buf_ptr;
struct wma_txrx_node *intr = wma_handle->interfaces;
int32_t ret=0;
WLAN_PHY_MODE chanmode;
tpAniSirGlobal mac_ctx = (tpAniSirGlobal)vos_get_context(
VOS_MODULE_ID_PE, wma_handle->vos_context);
if (!mac_ctx) {
WMA_LOGE("%s: Failed to get mac_ctx", __func__);
return;
}
len = sizeof(*cmd) + sizeof(wmi_channel) +
WMI_TLV_HDR_SIZE;
buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
if (!buf) {
WMA_LOGE("%s : wmi_buf_alloc failed", __func__);
adf_os_atomic_set(&intr[sessionId].vdev_restart_params.hidden_ssid_restart_in_progress,0);
return;
}
buf_ptr = (u_int8_t *) wmi_buf_data(buf);
cmd = (wmi_vdev_start_request_cmd_fixed_param *) buf_ptr;
chan = (wmi_channel *) (buf_ptr + sizeof(*cmd));
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_vdev_start_request_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_vdev_start_request_cmd_fixed_param));
WMITLV_SET_HDR(&chan->tlv_header,
WMITLV_TAG_STRUC_wmi_channel,
WMITLV_GET_STRUCT_TLVLEN(wmi_channel));
cmd->vdev_id = sessionId;
cmd->ssid.ssid_len = intr[sessionId].vdev_restart_params.ssid.ssid_len;
vos_mem_copy(cmd->ssid.ssid,
intr[sessionId].vdev_restart_params.ssid.ssid,
cmd->ssid.ssid_len);
cmd->flags = intr[sessionId].vdev_restart_params.flags;
if (intr[sessionId].vdev_restart_params.ssidHidden)
cmd->flags |= WMI_UNIFIED_VDEV_START_HIDDEN_SSID;
else
cmd->flags &= (0xFFFFFFFE);
cmd->requestor_id = intr[sessionId].vdev_restart_params.requestor_id;
cmd->disable_hw_ack = intr[sessionId].vdev_restart_params.disable_hw_ack;
chan->mhz = intr[sessionId].vdev_restart_params.chan.mhz;
chan->band_center_freq1 = intr[sessionId].vdev_restart_params.chan.band_center_freq1;
chan->band_center_freq2 = intr[sessionId].vdev_restart_params.chan.band_center_freq2;
chan->info = intr[sessionId].vdev_restart_params.chan.info;
chan->reg_info_1 = intr[sessionId].vdev_restart_params.chan.reg_info_1;
chan->reg_info_2 = intr[sessionId].vdev_restart_params.chan.reg_info_2;
if (chan->band_center_freq1 == 0) {
chan->band_center_freq1 = chan->mhz;
chanmode = intr[sessionId].chanmode;
if (chanmode == MODE_11AC_VHT80)
chan->band_center_freq1 = vos_chan_to_freq(
wma_getCenterChannel(
chan->mhz,
mac_ctx->roam.configParam.channelBondingMode5GHz));
if ((chanmode == MODE_11NA_HT40) ||
(chanmode == MODE_11AC_VHT40)) {
if (mac_ctx->roam.configParam.channelBondingMode5GHz ==
PHY_DOUBLE_CHANNEL_LOW_PRIMARY)
chan->band_center_freq1 += 10;
else
chan->band_center_freq1 -= 10;
}
if ((chanmode == MODE_11NG_HT40) ||
(chanmode == MODE_11AC_VHT40_2G)) {
if (mac_ctx->roam.configParam.channelBondingMode24GHz ==
PHY_DOUBLE_CHANNEL_LOW_PRIMARY)
chan->band_center_freq1 += 10;
else
chan->band_center_freq1 -= 10;
}
}
cmd->num_noa_descriptors = 0;
buf_ptr = (u_int8_t *)(((u_int8_t *) cmd) + sizeof(*cmd) +
sizeof(wmi_channel));
WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC,
cmd->num_noa_descriptors *
sizeof(wmi_p2p_noa_descriptor));
ret = wmi_unified_cmd_send(wma_handle->wmi_handle,buf,len,
WMI_VDEV_RESTART_REQUEST_CMDID);
if (ret < 0) {
WMA_LOGE("%s: Failed to send vdev restart command", __func__);
adf_os_atomic_set(&intr[sessionId].vdev_restart_params.hidden_ssid_restart_in_progress,0);
wmi_buf_free(buf);
}
}
static int wma_vdev_stop_ind(tp_wma_handle wma, u_int8_t *buf)
{
wmi_vdev_stopped_event_fixed_param *resp_event;
struct wma_target_req *req_msg;
ol_txrx_peer_handle peer;
ol_txrx_pdev_handle pdev;
u_int8_t peer_id;
struct wma_txrx_node *iface;
int32_t status = 0;
#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
tpAniSirGlobal mac_ctx = (tpAniSirGlobal)vos_get_context(
VOS_MODULE_ID_PE,
wma->vos_context);
if (NULL == mac_ctx) {
WMA_LOGE("%s: Failed to get mac_ctx", __func__);
return -EINVAL;
}
#endif /* FEATURE_AP_MCC_CH_AVOIDANCE */
WMA_LOGI("%s: Enter", __func__);
if (!buf) {
WMA_LOGE("Invalid event buffer");
return -EINVAL;
}
resp_event = (wmi_vdev_stopped_event_fixed_param *)buf;
if ((resp_event->vdev_id < wma->max_bssid) &&
(adf_os_atomic_read(&wma->interfaces[resp_event->vdev_id].vdev_restart_params.hidden_ssid_restart_in_progress)) &&
((wma->interfaces[resp_event->vdev_id].type == WMI_VDEV_TYPE_AP) &&
(wma->interfaces[resp_event->vdev_id].sub_type == 0))) {
WMA_LOGE("%s: vdev stop event recevied for hidden ssid set using IOCTL ", __func__);
req_msg = wma_fill_vdev_req(wma, resp_event->vdev_id,
WDA_HIDDEN_SSID_VDEV_RESTART,
WMA_TARGET_REQ_TYPE_VDEV_START, resp_event,
WMA_VDEV_START_REQUEST_TIMEOUT);
if (!req_msg) {
WMA_LOGE("%s: Failed to fill vdev request, vdev_id %d",
__func__, resp_event->vdev_id);
return -EINVAL;
}
wma_hidden_ssid_vdev_restart_on_vdev_stop(wma, resp_event->vdev_id);
}
req_msg = wma_find_vdev_req(wma, resp_event->vdev_id,
WMA_TARGET_REQ_TYPE_VDEV_STOP);
if (!req_msg) {
WMA_LOGP("%s: Failed to lookup vdev request for vdev id %d",
__func__, resp_event->vdev_id);
return -EINVAL;
}
pdev = vos_get_context(VOS_MODULE_ID_TXRX, wma->vos_context);
if (!pdev) {
WMA_LOGE("%s: pdev is NULL", __func__);
status = -EINVAL;
vos_timer_stop(&req_msg->event_timeout);
goto free_req_msg;
}
vos_timer_stop(&req_msg->event_timeout);
if (req_msg->msg_type == WDA_DELETE_BSS_REQ) {
tpDeleteBssParams params =
(tpDeleteBssParams)req_msg->user_data;
struct beacon_info *bcn;
if (resp_event->vdev_id >= wma->max_bssid) {
WMA_LOGE("%s: Invalid vdev_id %d", __func__,
resp_event->vdev_id);
vos_mem_free(params);
status = -EINVAL;
goto free_req_msg;
}
iface = &wma->interfaces[resp_event->vdev_id];
if (iface->handle == NULL) {
WMA_LOGE("%s vdev id %d is already deleted",
__func__, resp_event->vdev_id);
vos_mem_free(params);
status = -EINVAL;
goto free_req_msg;
}
/* Clear arp and ns offload cache */
vos_mem_zero(&iface->ns_offload_req,
sizeof(iface->ns_offload_req));
vos_mem_zero(&iface->arp_offload_req,
sizeof(iface->arp_offload_req));
#ifdef QCA_IBSS_SUPPORT
if ( wma_is_vdev_in_ibss_mode(wma, resp_event->vdev_id))
wma_delete_all_ibss_peers(wma, resp_event->vdev_id);
else
#endif
if (WMA_IS_VDEV_IN_NDI_MODE(wma->interfaces,
resp_event->vdev_id)) {
wma_delete_all_nan_remote_peers(wma,
resp_event->vdev_id);
} else {
if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id))
{
wma_delete_all_ap_remote_peers(wma, resp_event->vdev_id);
}
peer = ol_txrx_find_peer_by_addr(pdev, params->bssid,
&peer_id);
if (!peer)
WMA_LOGD("%s Failed to find peer %pM",
__func__, params->bssid);
wma_remove_peer(wma, params->bssid, resp_event->vdev_id,
peer, VOS_FALSE);
}
if (wmi_unified_vdev_down_send(wma->wmi_handle, resp_event->vdev_id) < 0) {
WMA_LOGE("Failed to send vdev down cmd: vdev %d",
resp_event->vdev_id);
} else {
wma->interfaces[resp_event->vdev_id].vdev_up = FALSE;
#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
if (mac_ctx->sap.sap_channel_avoidance)
wma_find_mcc_ap(wma,
resp_event->vdev_id,
false);
#endif /* FEATURE_AP_MCC_CH_AVOIDANCE */
}
ol_txrx_vdev_flush(iface->handle);
WMA_LOGD("%s, vdev_id: %d, un-pausing tx_ll_queue for VDEV_STOP rsp",
__func__, resp_event->vdev_id);
wdi_in_vdev_unpause(iface->handle,
OL_TXQ_PAUSE_REASON_VDEV_STOP);
iface->pause_bitmap &= ~(1 << PAUSE_TYPE_HOST);
adf_os_atomic_set(&iface->bss_status, WMA_BSS_STATUS_STOPPED);
WMA_LOGD("%s: (type %d subtype %d) BSS is stopped",
__func__, iface->type, iface->sub_type);
bcn = wma->interfaces[resp_event->vdev_id].beacon;
if (bcn) {
WMA_LOGD("%s: Freeing beacon struct %pK, "
"template memory %pK", __func__,
bcn, bcn->buf);
if (bcn->dma_mapped)
adf_nbuf_unmap_single(pdev->osdev, bcn->buf,
ADF_OS_DMA_TO_DEVICE);
adf_nbuf_free(bcn->buf);
vos_mem_free(bcn);
wma->interfaces[resp_event->vdev_id].beacon = NULL;
}
/* Timeout status means its WMA generated DEL BSS REQ when ADD
BSS REQ was timed out to stop the VDEV in this case no need to
send response to UMAC */
if (params->status == eHAL_STATUS_FW_MSG_TIMEDOUT){
vos_mem_free(params);
WMA_LOGE("%s: DEL BSS from ADD BSS timeout do not send "
"resp to UMAC (vdev id %x)",
__func__, resp_event->vdev_id);
} else {
params->status = VOS_STATUS_SUCCESS;
wma_send_msg(wma, WDA_DELETE_BSS_RSP, (void *)params, 0);
}
if (iface->del_staself_req) {
WMA_LOGA("scheduling defered deletion (vdev id %x)",
resp_event->vdev_id);
wma_vdev_detach(wma, iface->del_staself_req, 1);
}
}
free_req_msg:
vos_timer_destroy(&req_msg->event_timeout);
adf_os_mem_free(req_msg);
return status;
}
#ifdef WLAN_FEATURE_EXTWOW_SUPPORT
static void wma_send_status_of_ext_wow(tp_wma_handle wma, boolean status)
{
tSirReadyToExtWoWInd *ready_to_extwow;
VOS_STATUS vstatus;
vos_msg_t vos_msg;
u_int8_t len;
WMA_LOGD("Posting ready to suspend indication to umac");
len = sizeof(tSirReadyToExtWoWInd);
ready_to_extwow = (tSirReadyToExtWoWInd *) vos_mem_malloc(len);
if (NULL == ready_to_extwow) {
WMA_LOGE("%s: Memory allocation failure", __func__);
return;
}
ready_to_extwow->mesgType = eWNI_SME_READY_TO_EXTWOW_IND;
ready_to_extwow->mesgLen = len;
ready_to_extwow->status= status;
vos_msg.type = eWNI_SME_READY_TO_EXTWOW_IND;
vos_msg.bodyptr = (void *) ready_to_extwow;
vos_msg.bodyval = 0;
vstatus = vos_mq_post_message(VOS_MQ_ID_SME, &vos_msg);
if (vstatus != VOS_STATUS_SUCCESS) {
WMA_LOGE("Failed to post ready to suspend");
vos_mem_free(ready_to_extwow);
}
}
static int wma_enable_ext_wow(tp_wma_handle wma,
tpSirExtWoWParams params)
{
wmi_extwow_enable_cmd_fixed_param *cmd;
wmi_buf_t buf;
int32_t len;
int ret;
len = sizeof(wmi_extwow_enable_cmd_fixed_param);
buf = wmi_buf_alloc(wma->wmi_handle, len);
if (!buf) {
WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
return VOS_STATUS_E_NOMEM;
}
cmd = (wmi_extwow_enable_cmd_fixed_param *) wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_extwow_enable_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_extwow_enable_cmd_fixed_param));
cmd->vdev_id = params->vdev_id;
cmd->type = params->type;
cmd->wakeup_pin_num = params->wakeup_pin_num;
WMA_LOGD("%s: vdev_id %d type %d Wakeup_pin_num %x",
__func__, cmd->vdev_id,
cmd->type, cmd->wakeup_pin_num);
ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_EXTWOW_ENABLE_CMDID);
if (ret) {
WMA_LOGE("%s: Failed to set EXTWOW Enable", __func__);
wmi_buf_free(buf);
wma_send_status_of_ext_wow(wma, FALSE);
return VOS_STATUS_E_FAILURE;
}
wma_send_status_of_ext_wow(wma, TRUE);
return VOS_STATUS_SUCCESS;
}
static int wma_set_app_type1_params_in_fw(tp_wma_handle wma,
tpSirAppType1Params appType1Params)
{
wmi_extwow_set_app_type1_params_cmd_fixed_param *cmd;
wmi_buf_t buf;
int32_t len;
int ret;
len = sizeof(wmi_extwow_set_app_type1_params_cmd_fixed_param);
buf = wmi_buf_alloc(wma->wmi_handle, len);
if (!buf) {
WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
return VOS_STATUS_E_NOMEM;
}
cmd = (wmi_extwow_set_app_type1_params_cmd_fixed_param *)
wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_extwow_set_app_type1_params_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_extwow_set_app_type1_params_cmd_fixed_param));
cmd->vdev_id = appType1Params->vdev_id;
WMI_CHAR_ARRAY_TO_MAC_ADDR(appType1Params->wakee_mac_addr,
&cmd->wakee_mac);
vos_mem_copy(cmd->ident, appType1Params->identification_id, 8);
cmd->ident_len = appType1Params->id_length;
vos_mem_copy(cmd->passwd, appType1Params->password, 16);
cmd->passwd_len = appType1Params->pass_length;
WMA_LOGD("%s: vdev_id %d wakee_mac_addr %pM "
"identification_id %.8s id_length %u "
"password %.16s pass_length %u",
__func__, cmd->vdev_id, appType1Params->wakee_mac_addr,
cmd->ident, cmd->ident_len,
cmd->passwd, cmd->passwd_len);
ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID);
if (ret) {
WMA_LOGE("%s: Failed to set APP TYPE1 PARAMS", __func__);
wmi_buf_free(buf);
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
static int wma_set_app_type2_params_in_fw(tp_wma_handle wma,
tpSirAppType2Params appType2Params)
{
wmi_extwow_set_app_type2_params_cmd_fixed_param *cmd;
wmi_buf_t buf;
int32_t len;
int ret;
len = sizeof(wmi_extwow_set_app_type2_params_cmd_fixed_param);
buf = wmi_buf_alloc(wma->wmi_handle, len);
if (!buf) {
WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
return VOS_STATUS_E_NOMEM;
}
cmd = (wmi_extwow_set_app_type2_params_cmd_fixed_param *)
wmi_buf_data(buf);
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_extwow_set_app_type2_params_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_extwow_set_app_type2_params_cmd_fixed_param));
cmd->vdev_id = appType2Params->vdev_id;
vos_mem_copy(cmd->rc4_key, appType2Params->rc4_key, 16);
cmd->rc4_key_len = appType2Params->rc4_key_len;
cmd->ip_id = appType2Params->ip_id;
cmd->ip_device_ip = appType2Params->ip_device_ip;
cmd->ip_server_ip = appType2Params->ip_server_ip;
cmd->tcp_src_port = appType2Params->tcp_src_port;
cmd->tcp_dst_port = appType2Params->tcp_dst_port;
cmd->tcp_seq = appType2Params->tcp_seq;
cmd->tcp_ack_seq = appType2Params->tcp_ack_seq;
cmd->keepalive_init = appType2Params->keepalive_init;
cmd->keepalive_min = appType2Params->keepalive_min;
cmd->keepalive_max = appType2Params->keepalive_max;
cmd->keepalive_inc = appType2Params->keepalive_inc;
WMI_CHAR_ARRAY_TO_MAC_ADDR(appType2Params->gateway_mac,
&cmd->gateway_mac);
cmd->tcp_tx_timeout_val = appType2Params->tcp_tx_timeout_val;
cmd->tcp_rx_timeout_val = appType2Params->tcp_rx_timeout_val;
WMA_LOGD("%s: vdev_id %d gateway_mac %pM "
"rc4_key %.16s rc4_key_len %u "
"ip_id %x ip_device_ip %x ip_server_ip %x "
"tcp_src_port %u tcp_dst_port %u tcp_seq %u "
"tcp_ack_seq %u keepalive_init %u keepalive_min %u "
"keepalive_max %u keepalive_inc %u "
"tcp_tx_timeout_val %u tcp_rx_timeout_val %u",
__func__, cmd->vdev_id, appType2Params->gateway_mac,
cmd->rc4_key, cmd->rc4_key_len,
cmd->ip_id, cmd->ip_device_ip, cmd->ip_server_ip,
cmd->tcp_src_port, cmd->tcp_dst_port, cmd->tcp_seq,
cmd->tcp_ack_seq, cmd->keepalive_init, cmd->keepalive_min,
cmd->keepalive_max, cmd->keepalive_inc,
cmd->tcp_tx_timeout_val, cmd->tcp_rx_timeout_val);
ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID);
if (ret) {
WMA_LOGE("%s: Failed to set APP TYPE2 PARAMS", __func__);
wmi_buf_free(buf);
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
#endif
static void wma_update_pdev_stats(tp_wma_handle wma,
wmi_pdev_stats *pdev_stats)
{
tAniGetPEStatsRsp *stats_rsp_params;
tANI_U32 temp_mask;
tANI_U8 *stats_buf;
tCsrGlobalClassAStatsInfo *classa_stats = NULL;
struct wma_txrx_node *node;
u_int8_t i;
for (i = 0; i < wma->max_bssid; i++) {
node = &wma->interfaces[i];
stats_rsp_params = node->stats_rsp;
if (stats_rsp_params) {
node->fw_stats_set |= FW_PDEV_STATS_SET;
WMA_LOGD("<---FW PDEV STATS received for vdevId:%d",
i);
stats_buf = (tANI_U8 *) (stats_rsp_params + 1);
temp_mask = stats_rsp_params->statsMask;
if (temp_mask & (1 << eCsrSummaryStats))
stats_buf += sizeof(tCsrSummaryStatsInfo);
if (temp_mask & (1 << eCsrGlobalClassAStats)) {
classa_stats =
(tCsrGlobalClassAStatsInfo *) stats_buf;
classa_stats->max_pwr = pdev_stats->chan_tx_pwr;
}
}
}
}
/**
* wma_vdev_stats_lost_link_helper() - helper function to extract
* lost link information from vdev statistics event while deleting BSS.
* @wma: WMA handle
* @vdev_stats: statistics information from firmware
*
* This is for informing HDD to collect lost link information while
* disconnection. Following conditions to check
* 1. vdev is up
* 2. bssid is zero. When handling DELETE_BSS request message, it sets bssid to
* zero, hence add the check here to indicate the event comes during deleting
* BSS
* 3. DELETE_BSS is the request message queued. Put this condition check on the
* last one as it consumes more resource searching entries in the list
*
* Return: none
*/
static void wma_vdev_stats_lost_link_helper(tp_wma_handle wma,
wmi_vdev_stats *vdev_stats)
{
struct wma_txrx_node *node;
int8_t rssi;
struct wma_target_req *req_msg;
uint8_t zero_mac[ETH_ALEN] = {0};
int8_t bcn_snr, dat_snr;
node = &wma->interfaces[vdev_stats->vdev_id];
if (node->vdev_up &&
vos_mem_compare(node->bssid, zero_mac, ETH_ALEN)) {
req_msg = wma_peek_vdev_req(wma, vdev_stats->vdev_id,
WMA_TARGET_REQ_TYPE_VDEV_STOP);
if ((NULL == req_msg) ||
(WDA_DELETE_BSS_REQ != req_msg->msg_type)) {
WMA_LOGD("%s: cannot find DELETE_BSS request message",
__func__);
return;
}
bcn_snr = vdev_stats->vdev_snr.bcn_snr;
dat_snr = vdev_stats->vdev_snr.dat_snr;
WMA_LOGD("%s: get vdev id %d, beancon snr %d, data snr %d",
__func__, vdev_stats->vdev_id, bcn_snr, dat_snr);
if ((bcn_snr != WMA_TGT_INVALID_SNR_OLD) &&
(bcn_snr != WMA_TGT_INVALID_SNR_NEW))
rssi = bcn_snr;
else if ((dat_snr != WMA_TGT_INVALID_SNR_OLD) &&
(dat_snr != WMA_TGT_INVALID_SNR_NEW))
rssi = dat_snr;
else
rssi = WMA_TGT_INVALID_SNR_OLD;
/* Get the absolute rssi value from the current rssi value */
rssi = rssi + WMA_TGT_NOISE_FLOOR_DBM;
wma_lost_link_info_handler(wma, vdev_stats->vdev_id, rssi);
}
}
static void wma_update_vdev_stats(tp_wma_handle wma,
wmi_vdev_stats *vdev_stats)
{
tAniGetPEStatsRsp *stats_rsp_params;
tCsrSummaryStatsInfo *summary_stats = NULL;
tANI_U8 *stats_buf;
struct wma_txrx_node *node;
tANI_U8 i;
v_S7_t rssi = 0;
VOS_STATUS vos_status;
tAniGetRssiReq *pGetRssiReq = (tAniGetRssiReq*)wma->pGetRssiReq;
vos_msg_t sme_msg = {0};
int8_t bcn_snr, dat_snr;
node = &wma->interfaces[vdev_stats->vdev_id];
stats_rsp_params = node->stats_rsp;
if (stats_rsp_params) {
stats_buf = (tANI_U8 *) (stats_rsp_params + 1);
node->fw_stats_set |= FW_VDEV_STATS_SET;
WMA_LOGD("<---FW VDEV STATS received for vdevId:%d",
vdev_stats->vdev_id);
if (stats_rsp_params->statsMask &
(1 << eCsrSummaryStats)) {
summary_stats = (tCsrSummaryStatsInfo *) stats_buf;
for (i=0 ; i < 4 ; i++) {
summary_stats->tx_frm_cnt[i] =
vdev_stats->tx_frm_cnt[i];
summary_stats->fail_cnt[i] =
vdev_stats->fail_cnt[i];
summary_stats->multiple_retry_cnt[i] =
vdev_stats->multiple_retry_cnt[i];
}
summary_stats->rx_frm_cnt = vdev_stats->rx_frm_cnt;
summary_stats->rx_error_cnt = vdev_stats->rx_err_cnt;
summary_stats->rx_discard_cnt =
vdev_stats->rx_discard_cnt;
summary_stats->ack_fail_cnt = vdev_stats->ack_fail_cnt;
summary_stats->rts_succ_cnt = vdev_stats->rts_succ_cnt;
summary_stats->rts_fail_cnt = vdev_stats->rts_fail_cnt;
}
}
bcn_snr = vdev_stats->vdev_snr.bcn_snr;
dat_snr = vdev_stats->vdev_snr.dat_snr;
WMA_LOGD("vdev id %d beancon snr %d data snr %d",
vdev_stats->vdev_id, bcn_snr, dat_snr);
if (pGetRssiReq &&
pGetRssiReq->sessionId == vdev_stats->vdev_id) {
if ((bcn_snr == WMA_TGT_INVALID_SNR_OLD ||
bcn_snr == WMA_TGT_INVALID_SNR_NEW) &&
(dat_snr == WMA_TGT_INVALID_SNR_OLD ||
dat_snr == WMA_TGT_INVALID_SNR_NEW)) {
/*
* Firmware sends invalid snr till it sees
* Beacon/Data after connection since after
* vdev up fw resets the snr to invalid.
* In this duartion Host will return the last know
* rssi during connection.
*/
rssi = wma->first_rssi;
} else {
if (bcn_snr != WMA_TGT_INVALID_SNR_OLD &&
bcn_snr != WMA_TGT_INVALID_SNR_NEW) {
rssi = bcn_snr;
} else if (dat_snr != WMA_TGT_INVALID_SNR_OLD &&
dat_snr != WMA_TGT_INVALID_SNR_NEW) {
rssi = dat_snr;
}
/*
* Get the absolute rssi value from the current rssi value
* the sinr value is hardcoded into 0 in the core stack
*/
rssi = rssi + WMA_TGT_NOISE_FLOOR_DBM;
}
WMA_LOGD("Average Rssi = %d, vdev id= %d", rssi,
pGetRssiReq->sessionId);
/* update the average rssi value to UMAC layer */
if (NULL != pGetRssiReq->rssiCallback) {
((tCsrRssiCallback)(pGetRssiReq->rssiCallback))(rssi,pGetRssiReq->staId,
pGetRssiReq->pDevContext);
}
adf_os_mem_free(pGetRssiReq);
wma->pGetRssiReq = NULL;
}
if (node->psnr_req) {
tAniGetSnrReq *p_snr_req = node->psnr_req;
if ((bcn_snr != WMA_TGT_INVALID_SNR_OLD) &&
(bcn_snr != WMA_TGT_INVALID_SNR_NEW))
p_snr_req->snr = bcn_snr;
else if ((dat_snr != WMA_TGT_INVALID_SNR_OLD) &&
(dat_snr != WMA_TGT_INVALID_SNR_NEW))
p_snr_req->snr = dat_snr;
else
p_snr_req->snr = WMA_TGT_INVALID_SNR_OLD;
sme_msg.type = eWNI_SME_SNR_IND;
sme_msg.bodyptr = p_snr_req;
sme_msg.bodyval = 0;
vos_status = vos_mq_post_message(VOS_MODULE_ID_SME, &sme_msg);
if (!VOS_IS_STATUS_SUCCESS(vos_status)) {
WMA_LOGE("%s: Fail to post snr ind msg", __func__);
vos_mem_free(p_snr_req);
}
node->psnr_req = NULL;
}
wma_vdev_stats_lost_link_helper(wma, vdev_stats);
}
static void wma_post_stats(tp_wma_handle wma, struct wma_txrx_node *node)
{
tAniGetPEStatsRsp *stats_rsp_params;
stats_rsp_params = node->stats_rsp;
/* send response to UMAC*/
wma_send_msg(wma, WDA_GET_STATISTICS_RSP, (void *)stats_rsp_params, 0) ;
node->stats_rsp = NULL;
node->fw_stats_set = 0;
}
static void wma_update_peer_stats(tp_wma_handle wma, wmi_peer_stats *peer_stats)
{
tAniGetPEStatsRsp *stats_rsp_params;
tCsrGlobalClassAStatsInfo *classa_stats = NULL;
struct wma_txrx_node *node;
tANI_U8 *stats_buf, vdev_id, macaddr[IEEE80211_ADDR_LEN], mcsRateFlags;
tANI_U32 temp_mask;
WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, &macaddr[0]);
if (!wma_find_vdev_by_bssid(wma, macaddr, &vdev_id))
return;
node = &wma->interfaces[vdev_id];
if (node->stats_rsp) {
node->fw_stats_set |= FW_PEER_STATS_SET;
WMA_LOGD("<-- FW PEER STATS received for vdevId:%d", vdev_id);
stats_rsp_params = (tAniGetPEStatsRsp *) node->stats_rsp;
stats_buf = (tANI_U8 *) (stats_rsp_params + 1);
temp_mask = stats_rsp_params->statsMask;
if (temp_mask & (1 << eCsrSummaryStats))
stats_buf += sizeof(tCsrSummaryStatsInfo);
if (temp_mask & (1 << eCsrGlobalClassAStats)) {
classa_stats = (tCsrGlobalClassAStatsInfo *) stats_buf;
WMA_LOGD("peer tx rate:%d", peer_stats->peer_tx_rate);
/*The linkspeed returned by fw is in kbps so convert
*it in to units of 500kbps which is expected by UMAC*/
if (peer_stats->peer_tx_rate) {
classa_stats->tx_rate =
peer_stats->peer_tx_rate/500;
}
classa_stats->tx_rate_flags = node->rate_flags;
if (!(node->rate_flags & eHAL_TX_RATE_LEGACY)) {
classa_stats->mcs_index =
wma_get_mcs_idx((peer_stats->peer_tx_rate/100),
node->rate_flags,
node->nss,
&mcsRateFlags);
/* rx_frag_cnt and promiscuous_rx_frag_cnt
* parameter is currently not used. lets use the
* same parameter to hold the nss value and mcs
* rate flags */
classa_stats->rx_frag_cnt = node->nss;
classa_stats->promiscuous_rx_frag_cnt = mcsRateFlags;
}
/* FW returns tx power in intervals of 0.5 dBm
Convert it back to intervals of 1 dBm */
classa_stats->max_pwr =
roundup(classa_stats->max_pwr, 2) >> 1;
}
}
}
static void wma_post_link_status(tAniGetLinkStatus *pGetLinkStatus,
u_int8_t link_status)
{
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
vos_msg_t sme_msg = {0} ;
pGetLinkStatus->linkStatus = link_status;
sme_msg.type = eWNI_SME_LINK_STATUS_IND;
sme_msg.bodyptr = pGetLinkStatus;
sme_msg.bodyval = 0;
vos_status = vos_mq_post_message(VOS_MODULE_ID_SME, &sme_msg);
if (!VOS_IS_STATUS_SUCCESS(vos_status)) {
WMA_LOGE("%s: Fail to post link status ind msg", __func__);
vos_mem_free(pGetLinkStatus);
}
}
/**
* wma_update_per_chain_rssi_stats() - to store per chain rssi stats for
* all vdevs for which the stats were requested into csr stats structure.
* @wma: wma handle
* @rssi_stats: rssi stats
* @rssi_per_chain_stats: buffer where rssi stats to be stored
*
* This function stores per chain rssi stats received from fw for all vdevs for
* which the stats were requested into a csr stats structure.
*
* Return: void
*/
static void wma_update_per_chain_rssi_stats(tp_wma_handle wma,
wmi_rssi_stats *rssi_stats,
struct csr_per_chain_rssi_stats_info *rssi_per_chain_stats)
{
int i;
int8_t bcn_snr, dat_snr;
for (i = 0; i < NUM_CHAINS_MAX; i++) {
bcn_snr = rssi_stats->rssi_avg_beacon[i];
dat_snr = rssi_stats->rssi_avg_data[i];
WMA_LOGD("chain %d beacon snr %d data snr %d",
i, bcn_snr, dat_snr);
if ((dat_snr != WMA_TGT_INVALID_SNR_OLD &&
dat_snr != WMA_TGT_INVALID_SNR_NEW))
rssi_per_chain_stats->rssi[i] = dat_snr;
else if ((bcn_snr != WMA_TGT_INVALID_SNR_OLD &&
bcn_snr != WMA_TGT_INVALID_SNR_NEW))
rssi_per_chain_stats->rssi[i] = bcn_snr;
else
/*
* Firmware sends invalid snr till it sees
* Beacon/Data after connection since after
* vdev up fw resets the snr to invalid.
* In this duartion Host will return an invalid rssi
* value.
*/
rssi_per_chain_stats->rssi[i] = WMA_TGT_RSSI_INVALID;
/*
* Get the absolute rssi value from the current rssi value the
* sinr value is hardcoded into 0 in the CORE stack
*/
rssi_per_chain_stats->rssi[i] += WMA_TGT_NOISE_FLOOR_DBM;
WMI_MAC_ADDR_TO_CHAR_ARRAY(&(rssi_stats->peer_macaddr),
rssi_per_chain_stats->peer_mac_addr);
}
}
/**
* wma_update_rssi_stats() - to update rssi stats for all vdevs
* for which the stats were requested.
* @wma: wma handle
* @rssi_stats: rssi stats
*
* This function updates the rssi stats for all vdevs for which
* the stats were requested.
*
* Return: void
*/
static void wma_update_rssi_stats(tp_wma_handle wma,
wmi_rssi_stats *rssi_stats)
{
tAniGetPEStatsRsp *stats_rsp_params;
struct csr_per_chain_rssi_stats_info *rssi_per_chain_stats = NULL;
struct wma_txrx_node *node;
uint8_t *stats_buf;
uint32_t temp_mask;
uint8_t vdev_id;
vdev_id = rssi_stats->vdev_id;
node = &wma->interfaces[vdev_id];
if (node->stats_rsp) {
node->fw_stats_set |= FW_RSSI_PER_CHAIN_STATS_SET;
WMA_LOGD("<-- FW RSSI PER CHAIN STATS received for vdevId:%d",
vdev_id);
stats_rsp_params = (tAniGetPEStatsRsp *) node->stats_rsp;
stats_buf = (tANI_U8 *) (stats_rsp_params + 1);
temp_mask = stats_rsp_params->statsMask;
if (temp_mask & (1 << eCsrSummaryStats))
stats_buf += sizeof(tCsrSummaryStatsInfo);
if (temp_mask & (1 << eCsrGlobalClassAStats))
stats_buf += sizeof(tCsrGlobalClassAStatsInfo);
if (temp_mask & (1 << eCsrGlobalClassBStats))
stats_buf += sizeof(tCsrGlobalClassBStatsInfo);
if (temp_mask & (1 << eCsrGlobalClassCStats))
stats_buf += sizeof(tCsrGlobalClassCStatsInfo);
if (temp_mask & (1 << eCsrGlobalClassDStats))
stats_buf += sizeof(tCsrGlobalClassDStatsInfo);
if (temp_mask & (1 << eCsrPerStaStats))
stats_buf += sizeof(tCsrPerStaStatsInfo);
if (temp_mask & (1 << csr_per_chain_rssi_stats)) {
rssi_per_chain_stats =
(struct csr_per_chain_rssi_stats_info *)stats_buf;
wma_update_per_chain_rssi_stats(wma, rssi_stats,
rssi_per_chain_stats);
}
}
}
static int wma_link_status_rsp(tp_wma_handle wma, u_int8_t *buf)
{
wmi_vdev_rate_stats_event_fixed_param *event;
wmi_vdev_rate_ht_info *ht_info;
struct wma_txrx_node *intr = wma->interfaces;
u_int8_t link_status = LINK_STATUS_LEGACY;
int i;
event = (wmi_vdev_rate_stats_event_fixed_param *)buf;
ht_info = (wmi_vdev_rate_ht_info *)(buf + sizeof(*event));
WMA_LOGD("num_vdev_stats: %d", event->num_vdev_stats);
for (i = 0; (i < event->num_vdev_stats) && ht_info; i++) {
WMA_LOGD(
"%s vdevId:%d tx_nss:%d rx_nss:%d tx_preamble:%d rx_preamble:%d",
__func__,
ht_info->vdevid,
ht_info->tx_nss,
ht_info->rx_nss,
ht_info->tx_preamble,
ht_info->rx_preamble);
if (ht_info->vdevid < wma->max_bssid &&
intr[ht_info->vdevid].plink_status_req) {
if (ht_info->tx_nss || ht_info->rx_nss)
link_status = LINK_STATUS_MIMO;
if ((ht_info->tx_preamble == LINK_RATE_VHT) ||
(ht_info->rx_preamble == LINK_RATE_VHT))
link_status |= LINK_STATUS_VHT;
if (intr[ht_info->vdevid].nss == 2)
link_status |= LINK_SUPPORT_MIMO;
if (intr[ht_info->vdevid].rate_flags &
(eHAL_TX_RATE_VHT20 | eHAL_TX_RATE_VHT40 |
eHAL_TX_RATE_VHT80))
link_status |= LINK_SUPPORT_VHT;
wma_post_link_status(intr[ht_info->vdevid].plink_status_req,
link_status);
intr[ht_info->vdevid].plink_status_req = NULL;
link_status = LINK_STATUS_LEGACY;
}
ht_info++;
}
return 0;
}
static int wma_link_status_event_handler(void *handle, u_int8_t *cmd_param_info,
u_int32_t len)
{
WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *param_buf;
wmi_vdev_rate_stats_event_fixed_param *event;
vos_msg_t vos_msg = {0};
u_int32_t buf_size;
u_int8_t *buf;
param_buf =
(WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *)cmd_param_info;
if (!param_buf) {
WMA_LOGA("%s: Invalid stats event", __func__);
return -EINVAL;
}
event = param_buf->fixed_param;
if (event->num_vdev_stats > ((WMA_SVC_MSG_MAX_SIZE -
sizeof(*event)) / sizeof(wmi_vdev_rate_ht_info))) {
WMA_LOGE("%s: excess vdev_stats buffers:%d", __func__,
event->num_vdev_stats);
VOS_ASSERT(0);
return -EINVAL;
}
buf_size = sizeof(wmi_vdev_rate_stats_event_fixed_param) +
sizeof(wmi_vdev_rate_ht_info) * event->num_vdev_stats;
buf = vos_mem_malloc(buf_size);
if (!buf) {
WMA_LOGE("%s: Failed alloc memory for buf", __func__);
return -ENOMEM;
}
vos_mem_zero(buf, buf_size);
vos_mem_copy(buf, param_buf->fixed_param,
sizeof(wmi_vdev_rate_stats_event_fixed_param));
vos_mem_copy((buf + sizeof(wmi_vdev_rate_stats_event_fixed_param)),
param_buf->ht_info,
sizeof(wmi_vdev_rate_ht_info) * event->num_vdev_stats);
vos_msg.type = WDA_GET_LINK_STATUS_RSP_IND;
vos_msg.bodyptr = buf;
vos_msg.bodyval = 0;
if (VOS_STATUS_SUCCESS !=
vos_mq_post_message(VOS_MQ_ID_WDA, &vos_msg)) {
WMA_LOGP("%s: Failed to post WDA_GET_LINK_STATUS_RSP_IND msg",
__func__);
vos_mem_free(buf);
return -1;
}
WMA_LOGD("posted WDA_GET_LINK_STATUS_RSP_IND");
return 0;
}
/**
* wma_update_mib_stats() - send mib stats to hdd
* @wma_handle: pointer to wma handle.
* @event: mib stats
*
* This API handles the requested Mib stats and calls the callback to
* update hdd
*
* Return: Success or error code
*/
static int wma_update_mib_stats(tp_wma_handle wma_handle ,
wmi_mib_stats *event)
{
struct mib_stats_metrics mib_stats;
tpAniSirGlobal mac = (tpAniSirGlobal)vos_get_context(
VOS_MODULE_ID_PE, wma_handle->vos_context);
if (!mac) {
WMA_LOGE("%s: Invalid mac context", __func__);
return -EINVAL;
}
if (!mac->sme.csr_mib_stats_callback) {
WMA_LOGE("%s: Callback not registered", __func__);
return -EINVAL;
}
mib_stats.mib_counters.tx_frags =
event->tx_mpdu_grp_frag_cnt;
mib_stats.mib_counters.group_tx_frames =
event->tx_msdu_grp_frm_cnt;
mib_stats.mib_counters.failed_cnt = event->tx_msdu_fail_cnt;
mib_stats.mib_counters.rx_frags = event->rx_mpdu_frag_cnt;
mib_stats.mib_counters.group_rx_frames =
event->rx_msdu_grp_frm_cnt;
mib_stats.mib_counters.fcs_error_cnt =
event->rx_mpdu_fcs_err;
mib_stats.mib_counters.tx_frames =
event->tx_msdu_frm_cnt;
mib_stats.mib_mac_statistics.retry_cnt =
event->tx_msdu_retry_cnt;
mib_stats.mib_mac_statistics.frame_dup_cnt =
event->rx_frm_dup_cnt;
mib_stats.mib_mac_statistics.rts_success_cnt =
event->tx_rts_success_cnt;
mib_stats.mib_mac_statistics.rts_fail_cnt =
event->tx_rts_fail_cnt;
mib_stats.mib_qos_counters.qos_tx_frag_cnt =
event->tx_Qos_mpdu_grp_frag_cnt;
mib_stats.mib_qos_counters.qos_retry_cnt =
event->tx_Qos_msdu_retry_UP;
mib_stats.mib_qos_counters.qos_failed_cnt = event->tx_Qos_msdu_fail_UP;
mib_stats.mib_qos_counters.qos_frame_dup_cnt =
event->rx_Qos_frm_dup_cnt_UP;