qcacld-2.0: Implement descriptor pool for fw stats

The kernel address is used as cookie to keep track
of stats request. This address can be disclosed to
target leading to a security vulnerability.

Implement a FW stats descriptor pool, and use a
descriptor ID to keep track of stats requests,
instead of the kernel address, to prevent
kernel address leak.

Change-Id: Ib49150da899c0b9314f614868a90867f4aa92d3d
CRs-Fixed: 2276007
This commit is contained in:
Alok Kumar 2018-08-31 17:00:14 +05:30 committed by L R
parent 4ba2e61837
commit 8c60e92672
7 changed files with 255 additions and 29 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@ -545,7 +545,7 @@ htt_h2t_dbg_stats_get(
u_int32_t stats_type_reset_mask,
u_int8_t cfg_stat_type,
u_int32_t cfg_val,
u_int64_t cookie)
u_int8_t cookie)
{
struct htt_htc_pkt *pkt;
adf_nbuf_t msg;
@ -607,11 +607,11 @@ htt_h2t_dbg_stats_get(
/* cookie LSBs */
msg_word++;
*msg_word = cookie & 0xffffffff;
*msg_word = cookie;
/* cookie MSBs */
msg_word++;
*msg_word = cookie >> 32;
*msg_word = 0;
SET_HTC_PACKET_INFO_TX(
&pkt->htc_pkt,

View File

@ -371,11 +371,10 @@ htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg )
#if TXRX_STATS_LEVEL != TXRX_STATS_LEVEL_OFF
case HTT_T2H_MSG_TYPE_STATS_CONF:
{
u_int64_t cookie;
u_int8_t cookie;
u_int8_t *stats_info_list;
cookie = *(msg_word + 1);
cookie |= ((u_int64_t) (*(msg_word + 2))) << 32;
stats_info_list = (u_int8_t *) (msg_word + 3);
htc_pm_runtime_put(pdev->htc_pdev);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@ -361,6 +361,7 @@ ol_txrx_pdev_attach(
}
TXRX_STATS_INIT(pdev);
ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
TAILQ_INIT(&pdev->vdev_list);
TAILQ_INIT(&pdev->req_list);
@ -856,6 +857,7 @@ htt_attach_fail:
ol_txrx_peer_find_detach(pdev);
peer_find_attach_fail:
ol_txrx_fw_stats_desc_pool_deinit(pdev);
adf_os_mem_free(pdev);
ol_attach_fail:
@ -979,6 +981,7 @@ ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
htt_detach(pdev->htt_pdev);
ol_txrx_peer_find_detach(pdev);
ol_txrx_fw_stats_desc_pool_deinit(pdev);
adf_os_spinlock_destroy(&pdev->tx_mutex);
adf_os_spinlock_destroy(&pdev->peer_ref_mutex);
@ -2025,7 +2028,7 @@ ol_txrx_fw_stats_cfg(
u_int8_t cfg_stats_type,
u_int32_t cfg_val)
{
u_int64_t dummy_cookie = 0;
u_int8_t dummy_cookie = 0;
htt_h2t_dbg_stats_get(
vdev->pdev->htt_pdev,
0 /* upload mask */,
@ -2042,8 +2045,10 @@ ol_txrx_fw_stats_get(
bool response_expected)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
u_int64_t cookie;
uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
struct ol_txrx_stats_req_internal *non_volatile_req;
struct ol_txrx_fw_stats_desc_t *desc = NULL;
struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
if (!pdev ||
req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
@ -2057,7 +2062,7 @@ ol_txrx_fw_stats_get(
* (The one provided as an argument is likely allocated on the stack.)
*/
non_volatile_req = adf_os_mem_alloc(pdev->osdev, sizeof(*non_volatile_req));
if (! non_volatile_req) {
if (!non_volatile_req) {
return A_NO_MEMORY;
}
/* copy the caller's specifications */
@ -2065,13 +2070,20 @@ ol_txrx_fw_stats_get(
non_volatile_req->serviced = 0;
non_volatile_req->offset = 0;
/* use the non-volatile request object's address as the cookie */
cookie = OL_TXRX_STATS_PTR_TO_U64(non_volatile_req);
adf_os_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
pdev->req_list_depth++;
adf_os_spin_unlock_bh(&pdev->req_list_spinlock);
if (response_expected) {
desc = ol_txrx_fw_stats_desc_alloc(pdev);
if (!desc) {
vos_mem_free(non_volatile_req);
return A_ERROR;
}
/* use the desc id as the cookie */
cookie = desc->desc_id;
desc->req = non_volatile_req;
adf_os_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
pdev->req_list_depth++;
adf_os_spin_unlock_bh(&pdev->req_list_spinlock);
}
if (htt_h2t_dbg_stats_get(
pdev->htt_pdev,
@ -2080,10 +2092,31 @@ ol_txrx_fw_stats_get(
HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
cookie))
{
adf_os_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
pdev->req_list_depth--;
adf_os_spin_unlock_bh(&pdev->req_list_spinlock);
if (response_expected) {
adf_os_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
req_list_elem);
pdev->req_list_depth--;
adf_os_spin_unlock_bh(&pdev->req_list_spinlock);
if (desc) {
adf_os_spin_lock_bh(&pdev->
ol_txrx_fw_stats_desc_pool.
pool_lock);
desc->req = NULL;
elem = container_of(desc,
struct
ol_txrx_fw_stats_desc_elem_t,
desc);
elem->next =
pdev->ol_txrx_fw_stats_desc_pool.
freelist;
pdev->ol_txrx_fw_stats_desc_pool.
freelist = elem;
adf_os_spin_unlock_bh(&pdev->
ol_txrx_fw_stats_desc_pool.
pool_lock);
}
}
adf_os_mem_free(non_volatile_req);
return A_ERROR;
@ -2095,10 +2128,161 @@ ol_txrx_fw_stats_get(
return A_OK;
}
#endif
/**
* ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
* @pdev: handle to ol txrx pdev
* @pool_size: Size of fw stats descriptor pool
*
* Return: 0 for success, error code on failure.
*/
int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
uint8_t pool_size)
{
int i;
if (!pdev) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: pdev is NULL", __func__);
return -EINVAL;
}
pdev->ol_txrx_fw_stats_desc_pool.pool = vos_mem_malloc(pool_size *
sizeof(struct ol_txrx_fw_stats_desc_elem_t));
if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: failed to allocate desc pool", __func__);
return -ENOMEM;
}
pdev->ol_txrx_fw_stats_desc_pool.freelist =
&pdev->ol_txrx_fw_stats_desc_pool.pool[0];
pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
for (i = 0; i < (pool_size - 1); i++) {
pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
&pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
}
pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
adf_os_spinlock_init(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
adf_os_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
adf_os_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
return 0;
}
/**
* ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
* fw stats descriptor pool
* @pdev: handle to ol txrx pdev
*
* Return: None
*/
void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
{
if (!pdev) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: pdev is NULL", __func__);
return;
}
if (!adf_os_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: Pool is not initialized", __func__);
return;
}
if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: Pool is not allocated", __func__);
return;
}
adf_os_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
adf_os_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
vos_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
adf_os_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
}
/**
* ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
* free descriptor pool
* @pdev: handle to ol txrx pdev
*
* Return: pointer to fw stats descriptor, NULL on failure
*/
struct ol_txrx_fw_stats_desc_t
*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
{
struct ol_txrx_fw_stats_desc_t *desc = NULL;
adf_os_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
if (!adf_os_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
adf_os_spin_unlock_bh(&pdev->
ol_txrx_fw_stats_desc_pool.pool_lock);
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: Pool deinitialized", __func__);
return NULL;
}
if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
pdev->ol_txrx_fw_stats_desc_pool.freelist =
pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
}
adf_os_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
if (desc) {
TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
"%s: desc_id %d allocated",
__func__, desc->desc_id);
}
else {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: fw stats descriptors are exhausted", __func__);
}
return desc;
}
/**
* ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
* back into free pool
* @pdev: handle to ol txrx pdev
* @fw_stats_desc: fw_stats_desc_get descriptor
*
* Return: pointer to request
*/
struct ol_txrx_stats_req_internal
*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
unsigned char desc_id)
{
struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
struct ol_txrx_stats_req_internal *req;
adf_os_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
if (!adf_os_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
adf_os_spin_unlock_bh(&pdev->
ol_txrx_fw_stats_desc_pool.pool_lock);
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: Desc ID %u Pool deinitialized",
__func__, desc_id);
return NULL;
}
desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
req = desc_elem->desc.req;
desc_elem->desc.req = NULL;
desc_elem->next =
pdev->ol_txrx_fw_stats_desc_pool.freelist;
pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
adf_os_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
return req;
}
void
ol_txrx_fw_stats_handler(
ol_txrx_pdev_handle pdev,
u_int64_t cookie,
u_int8_t cookie,
u_int8_t *stats_info_list)
{
enum htt_dbg_stats_type type;
@ -2109,7 +2293,18 @@ ol_txrx_fw_stats_handler(
int more = 0;
int found = 0;
req = OL_TXRX_U64_TO_STATS_PTR(cookie);
if (cookie >= FW_STATS_DESC_POOL_SIZE) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Cookie is not valid",
__func__);
return;
}
req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
if (!req) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"%s: Request not retrieved for cookie %u", __func__,
(uint8_t)cookie);
return;
}
adf_os_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2014,2017-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@ -51,6 +51,10 @@ ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
#define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
#endif
#ifndef FW_STATS_DESC_POOL_SIZE
#define FW_STATS_DESC_POOL_SIZE 10
#endif
#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
#define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
#define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100
@ -59,4 +63,14 @@ ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
#ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
#define TXRX_HL_TX_DESC_HI_PRIO_RESERVED 20
#endif
int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
uint8_t pool_size);
void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev);
struct ol_txrx_fw_stats_desc_t
*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t
*pdev);
struct ol_txrx_stats_req_internal *ol_txrx_fw_stats_desc_get_req(struct
ol_txrx_pdev_t *pdev, uint8_t desc_id);
#endif /* _OL_TXRX__H_ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@ -432,6 +432,16 @@ struct ol_tx_group_credit_stats_t {
u_int16_t wrap_around;
};
struct ol_txrx_fw_stats_desc_t {
struct ol_txrx_stats_req_internal *req;
unsigned char desc_id;
};
struct ol_txrx_fw_stats_desc_elem_t {
struct ol_txrx_fw_stats_desc_elem_t *next;
struct ol_txrx_fw_stats_desc_t desc;
};
/*
* As depicted in the diagram below, the pdev contains an array of
* NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
@ -537,6 +547,14 @@ struct ol_txrx_pdev_t {
adf_os_atomic_t target_tx_credit;
adf_os_atomic_t orig_target_tx_credit;
struct {
uint16_t pool_size;
struct ol_txrx_fw_stats_desc_elem_t *pool;
struct ol_txrx_fw_stats_desc_elem_t *freelist;
adf_os_spinlock_t pool_lock;
adf_os_atomic_t initialized;
} ol_txrx_fw_stats_desc_pool;
/* Peer mac address to staid mapping */
struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
* Copyright (c) 2011, 2014, 2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@ -170,7 +170,7 @@ htt_h2t_dbg_stats_get(
u_int32_t stats_type_reset_mask,
u_int8_t cfg_stats_type,
u_int32_t cfg_val,
u_int64_t cookie);
u_int8_t cookie);
/**
* @brief Get the fields from HTT T2H stats upload message's stats info header

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2014, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@ -643,7 +643,7 @@ ol_rx_pn_ind_handler(
void
ol_txrx_fw_stats_handler(
ol_txrx_pdev_handle pdev,
u_int64_t cookie,
u_int8_t cookie,
u_int8_t *stats_info_list);
/**