soc: qcom: smem: validate fields of shared structures

Structures in shared memory that can be modified by remote
processors may have untrusted values, they should be validated
before use.

Adding proper validation before using fields of shared
structures.

CRs-Fixed: 2421602
Change-Id: I947ed5b0fe5705e5223d75b0ea8aafb36113ca5a
Signed-off-by: Deepak Kumar Singh <deesin@codeaurora.org>
This commit is contained in:
Deepak Kumar Singh 2019-05-29 16:07:46 +05:30 committed by syphyr
parent fa4e1d8f35
commit 9a89fcb7d3
1 changed files with 128 additions and 42 deletions

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015,2017 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2015,2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -178,6 +178,20 @@ static struct restart_notifier_block restart_notifiers[] = {
static int init_smem_remote_spinlock(void);
/**
* smem_get_toc() - Used for getting partitions TOC
*
* @return - Base address off partitions TOC
*
* Helper function to get base address of partition TOC,
* that is present in top 4K of first smem region.
*/
static struct smem_toc __iomem *smem_get_toc(void)
{
return smem_areas[0].virt_addr +
smem_areas[0].size - 4 * 1024;
}
/**
* is_probe_done() - Did the probe function successfully complete
*
@ -312,6 +326,7 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
int use_spinlocks = spinlocks_initialized && use_rspinlock;
void *ret = 0;
unsigned long flags = 0;
uint32_t e_size;
if (!skip_init_check && !smem_initialized_check())
return ret;
@ -325,7 +340,11 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
if (toc[id].allocated) {
phys_addr_t phys_base;
*size = toc[id].size;
e_size = toc[id].size;
if (e_size > smem_ram_size)
return ret;
*size = e_size;
barrier();
phys_base = toc[id].reserved & BASE_ADDR_MASK;
@ -360,12 +379,19 @@ static void *__smem_get_entry_secure(unsigned id,
bool skip_init_check,
bool use_rspinlock)
{
struct smem_partition_header *hdr;
unsigned long lflags = 0;
void *item = NULL;
struct smem_partition_allocation_header *alloc_hdr;
struct smem_partition_header *hdr;
uint32_t offset_free_uncached;
struct smem_toc __iomem *toc;
uint32_t offset_free_cached;
unsigned long lflags = 0;
uint32_t partition_size;
uint32_t partition_num;
uint32_t padding_data;
uint32_t padding_hdr;
uint32_t a_hdr_size;
uint32_t item_size;
void *item = NULL;
int rc;
SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
@ -388,8 +414,10 @@ static void *__smem_get_entry_secure(unsigned id,
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
return __smem_get_entry_nonsecure(id, size, skip_init_check,
use_rspinlock);
toc = smem_get_toc();
partition_num = partitions[to_proc].partition_num;
partition_size = readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
if (unlikely(!spinlocks_initialized)) {
rc = init_smem_remote_spinlock();
@ -415,11 +443,20 @@ static void *__smem_get_entry_secure(unsigned id,
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr),
partitions[to_proc].size_cacheline);
for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
offset_free_cached = hdr->offset_free_cached;
if (WARN_ON(offset_free_cached > partition_size))
return NULL;
for (alloc_hdr = (void *)(hdr) + partition_size - a_hdr_size;
(void *)(alloc_hdr) > (void *)(hdr) +
hdr->offset_free_cached;
offset_free_cached;
alloc_hdr = (void *)(alloc_hdr) -
alloc_hdr->size - a_hdr_size) {
item_size - a_hdr_size) {
item_size = alloc_hdr->size;
padding_data = alloc_hdr->padding_data;
if (WARN_ON(padding_data > item_size
|| item_size > partition_size))
return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@ -432,20 +469,30 @@ static void *__smem_get_entry_secure(unsigned id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
*size = ALIGN(alloc_hdr->size -
alloc_hdr->padding_data, 8);
item = (void *)(alloc_hdr) - alloc_hdr->size;
*size = ALIGN(item_size - padding_data, 8);
item = (void *)(alloc_hdr) - item_size;
break;
}
}
} else {
offset_free_uncached = hdr->offset_free_uncached;
if (WARN_ON(offset_free_uncached > partition_size))
return NULL;
for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
(void *)(alloc_hdr) < (void *)(hdr) +
hdr->offset_free_uncached;
offset_free_uncached;
alloc_hdr = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
alloc_hdr->padding_hdr +
alloc_hdr->size) {
padding_hdr +
item_size) {
padding_hdr = alloc_hdr->padding_hdr;
padding_data = alloc_hdr->padding_data;
item_size = alloc_hdr->size;
if (WARN_ON(padding_hdr > partition_size
|| item_size > partition_size
|| padding_data > item_size))
return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@ -458,11 +505,10 @@ static void *__smem_get_entry_secure(unsigned id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
*size = ALIGN(alloc_hdr->size -
alloc_hdr->padding_data, 8);
*size = ALIGN(item_size - padding_data, 8);
item = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
alloc_hdr->padding_hdr;
padding_hdr;
break;
}
}
@ -551,10 +597,17 @@ static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
void *smem_base = smem_ram_base;
struct smem_shared *shared = smem_base;
struct smem_heap_entry *toc = shared->heap_toc;
uint32_t free_offset, heap_remaining;
void *ret = NULL;
if (shared->heap_info.heap_remaining >= size_in) {
toc[id].offset = shared->heap_info.free_offset;
heap_remaining = shared->heap_info.heap_remaining;
free_offset = shared->heap_info.free_offset;
if (WARN_ON(heap_remaining > smem_ram_size
|| free_offset > smem_ram_size))
return NULL;
if (heap_remaining >= size_in) {
toc[id].offset = free_offset;
toc[id].size = size_in;
/*
* wmb() is necessary to ensure the allocation data is
@ -566,7 +619,7 @@ static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
shared->heap_info.free_offset += size_in;
shared->heap_info.heap_remaining -= size_in;
ret = smem_base + toc[id].offset;
ret = smem_base + free_offset;
/*
* wmb() is necessary to ensure the heap data is consistent
* before continuing to prevent race conditions with remote
@ -602,11 +655,15 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
void *smem_base = smem_ram_base;
struct smem_partition_header *hdr;
struct smem_partition_allocation_header *alloc_hdr;
uint32_t offset_free_uncached;
struct smem_toc __iomem *toc;
uint32_t offset_free_cached;
uint32_t partition_size;
uint32_t partition_num;
uint32_t a_hdr_size;
uint32_t a_data_size;
uint32_t size_cacheline;
uint32_t free_space;
uint32_t partition_num;
void *ret = NULL;
hdr = smem_base + partitions[to_proc].offset;
@ -623,27 +680,36 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
}
size_cacheline = partitions[to_proc].size_cacheline;
free_space = hdr->offset_free_cached -
hdr->offset_free_uncached;
toc = smem_get_toc();
partition_size = readl_relaxed(&toc->entry[partition_num].size);
offset_free_cached = hdr->offset_free_cached;
offset_free_uncached = hdr->offset_free_uncached;
if (WARN_ON(offset_free_uncached > offset_free_cached
|| offset_free_cached > partition_size))
return NULL;
free_space = offset_free_cached - offset_free_uncached;
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
a_data_size = ALIGN(size_in, size_cacheline);
if (free_space < a_hdr_size + a_data_size) {
if (free_space < a_hdr_size + a_data_size
|| free_space < size_in) {
SMEM_INFO(
"%s: id %u not enough memory %u (required %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size);
"%s: id %u not enough memory %u (required %u), (size_in %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size, size_in);
return ret;
}
alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
a_hdr_size;
alloc_hdr = (void *)(hdr) + offset_free_cached - a_hdr_size;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
hdr->offset_free_cached = hdr->offset_free_cached -
hdr->offset_free_cached = offset_free_cached -
a_hdr_size - a_data_size;
ret = (void *)(alloc_hdr) - a_data_size;
/*
@ -658,20 +724,21 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
} else {
a_hdr_size = sizeof(*alloc_hdr);
a_data_size = ALIGN(size_in, 8);
if (free_space < a_hdr_size + a_data_size) {
if (free_space < a_hdr_size + a_data_size
|| free_space < size_in) {
SMEM_INFO(
"%s: id %u not enough memory %u (required %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size);
"%s: id %u not enough memory %u (required %u) (size_in %u)\n",
__func__, id, free_space,
a_hdr_size + a_data_size, size_in);
return ret;
}
alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
alloc_hdr = (void *)(hdr) + offset_free_uncached;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
hdr->offset_free_uncached = hdr->offset_free_uncached +
hdr->offset_free_uncached = offset_free_uncached +
a_hdr_size + a_data_size;
ret = alloc_hdr + 1;
}
@ -856,6 +923,12 @@ unsigned smem_get_free_space(unsigned to_proc)
{
struct smem_partition_header *hdr;
struct smem_shared *shared;
uint32_t offset_free_uncached;
struct smem_toc __iomem *toc;
uint32_t offset_free_cached;
uint32_t heap_remaining;
uint32_t p_size;
uint32_t p_num;
if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
@ -870,11 +943,24 @@ unsigned smem_get_free_space(unsigned to_proc)
return UINT_MAX;
}
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
return hdr->offset_free_cached - hdr->offset_free_uncached;
} else {
shared = smem_ram_base;
return shared->heap_info.heap_remaining;
offset_free_cached = hdr->offset_free_cached;
offset_free_uncached = hdr->offset_free_uncached;
toc = smem_get_toc();
p_num = partitions[to_proc].partition_num;
p_size = readl_relaxed(&toc->entry[p_num].size);
if (WARN_ON(offset_free_uncached > offset_free_cached
|| offset_free_cached > p_size))
return -EINVAL;
return offset_free_cached - offset_free_uncached;
}
shared = smem_ram_base;
heap_remaining = shared->heap_info.heap_remaining;
if (WARN_ON(heap_remaining > smem_ram_size))
return -EINVAL;
return heap_remaining;
}
EXPORT_SYMBOL(smem_get_free_space);
@ -1158,8 +1244,8 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
LOG_ERR("Smem partition %d hdr magic is bad\n", num);
BUG();
}
if (!hdr->size) {
LOG_ERR("Smem partition %d size is 0\n", num);
if (hdr->size != entry->size) {
LOG_ERR("Smem partition %d size is invalid\n", num);
BUG();
}
if (hdr->offset_free_uncached > hdr->size) {