msm: rtb: Fix buffer corruption issue

Consider the case of a nentries==8 and 3 cpus.
Numbers in parenthesis are the equivalent location in the circular buffer.
CPU:   Index0:  Index1: Index2: Index3:
0      0        3       6       9(1)
1      1        4       7       10(2)
2      2        5       8(0)

The current design is only appropriate for the case where
nentries % nrcpus == 0.

Fix this issue by incrementing the index by (nentries % nrcpus)
each time circular buffer wraps around.

CPU:   Index0:  Index1: Index2:
0      0        3       6+2==8(0)
1      1        4       7+2==9(1)
2      2        5       8+2==10(2)

Change-Id: I4f96eb4c971cc18357e145dabcf4272e466dcda2
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2015-02-08 23:18:40 -08:00
parent 0b9ec111f5
commit 07f726d4ba
1 changed files with 22 additions and 5 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -160,10 +160,20 @@ static void uncached_logk_timestamp(int idx)
}
#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
/*
* Since it is not necessarily true that nentries % step_size == 0,
* must make appropriate adjustments to the index when a "wraparound"
* occurs to ensure that msm_rtb.rtb[x] always belongs to the same cpu.
* It is desired to give all cpus the same number of entries; this leaves
* (nentries % step_size) dead space at the end of the buffer.
*/
static int msm_rtb_get_idx(void)
{
int cpu, i, offset;
atomic_t *index;
unsigned long flags;
u32 unused_buffer_size = msm_rtb.nentries % msm_rtb.step_size;
int adjusted_size;
/*
* ideally we would use get_cpu but this is a close enough
@ -173,17 +183,24 @@ static int msm_rtb_get_idx(void)
index = &per_cpu(msm_rtb_idx_cpu, cpu);
local_irq_save(flags);
i = atomic_add_return(msm_rtb.step_size, index);
i -= msm_rtb.step_size;
/* Check if index has wrapped around */
offset = (i & (msm_rtb.nentries - 1)) -
((i - msm_rtb.step_size) & (msm_rtb.nentries - 1));
/*
* Check if index has wrapped around or is in the unused region at the
* end of the buffer
*/
adjusted_size = atomic_read(index) + unused_buffer_size;
offset = (adjusted_size & (msm_rtb.nentries - 1)) -
((adjusted_size - msm_rtb.step_size) & (msm_rtb.nentries - 1));
if (offset < 0) {
uncached_logk_timestamp(i);
i = atomic_add_return(msm_rtb.step_size, index);
i = atomic_add_return(msm_rtb.step_size + unused_buffer_size,
index);
i -= msm_rtb.step_size;
}
local_irq_restore(flags);
return i;
}