mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-09-21 20:04:01 +00:00
tracing: Fix circular dead lock in stack trace
When we cat <debugfs>/tracing/stack_trace, we may cause circular lock: sys_read() t_start() arch_spin_lock(&max_stack_lock); t_show() seq_printf(), vsnprintf() .... /* they are all trace-able, when they are traced, max_stack_lock may be required again. */ The following script can trigger this circular dead lock very easy: #!/bin/bash echo 1 > /proc/sys/kernel/stack_tracer_enabled mount -t debugfs xxx /mnt > /dev/null 2>&1 ( # make check_stack() zealous to require max_stack_lock for ((; ;)) { echo 1 > /mnt/tracing/stack_max_size } ) & for ((; ;)) { cat /mnt/tracing/stack_trace > /dev/null } To fix this bug, we increase the percpu trace_active before require the lock. Reported-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> LKML-Reference: <4B67D4F9.9080905@cn.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
ab658321f3
commit
4f48f8b7fd
|
@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
||||||
unsigned long val, flags;
|
unsigned long val, flags;
|
||||||
char buf[64];
|
char buf[64];
|
||||||
int ret;
|
int ret;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
if (count >= sizeof(buf))
|
if (count >= sizeof(buf))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In case we trace inside arch_spin_lock() or after (NMI),
|
||||||
|
* we will cause circular lock, so we also need to increase
|
||||||
|
* the percpu trace_active here.
|
||||||
|
*/
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
per_cpu(trace_active, cpu)++;
|
||||||
|
|
||||||
arch_spin_lock(&max_stack_lock);
|
arch_spin_lock(&max_stack_lock);
|
||||||
*ptr = val;
|
*ptr = val;
|
||||||
arch_spin_unlock(&max_stack_lock);
|
arch_spin_unlock(&max_stack_lock);
|
||||||
|
|
||||||
|
per_cpu(trace_active, cpu)--;
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
|
@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||||
|
|
||||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
per_cpu(trace_active, cpu)++;
|
||||||
|
|
||||||
arch_spin_lock(&max_stack_lock);
|
arch_spin_lock(&max_stack_lock);
|
||||||
|
|
||||||
if (*pos == 0)
|
if (*pos == 0)
|
||||||
|
@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
||||||
|
|
||||||
static void t_stop(struct seq_file *m, void *p)
|
static void t_stop(struct seq_file *m, void *p)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
arch_spin_unlock(&max_stack_lock);
|
arch_spin_unlock(&max_stack_lock);
|
||||||
|
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
per_cpu(trace_active, cpu)--;
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue