mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
9b4dadbfa9
[ Upstream commit 31b265b3baaf55f209229888b7ffea523ddab366 ] As reported back in 2016-11 [1], the "ftdump" kdb command triggers a BUG for "sleeping function called from invalid context". kdb's "ftdump" command wants to call ring_buffer_read_prepare() in atomic context. A very simple solution for this is to add allocation flags to ring_buffer_read_prepare() so kdb can call it without triggering the allocation error. This patch does that. Note that in the original email thread about this, it was suggested that perhaps the solution for kdb was to either preallocate the buffer ahead of time or create our own iterator. I'm hoping that this alternative of adding allocation flags to ring_buffer_read_prepare() can be considered since it means I don't need to duplicate more of the core trace code into "trace_kdb.c" (for either creating my own iterator or re-preparing a ring allocator whose memory was already allocated). NOTE: another option for kdb is to actually figure out how to make it reuse the existing ftrace_dump() function and totally eliminate the duplication. This sounds very appealing and actually works (the "sr z" command can be seen to properly dump the ftrace buffer). The downside here is that ftrace_dump() fully consumes the trace buffer. Unless that is changed I'd rather not use it because it means "ftdump | grep xyz" won't be very useful to search the ftrace buffer since it will throw away the whole trace on the first grep. A future patch to dump only the last few lines of the buffer will also be hard to implement. [1] https://lkml.kernel.org/r/20161117191605.GA21459@google.com Link: http://lkml.kernel.org/r/20190308193205.213659-1-dianders@chromium.org Reported-by: Brian Norris <briannorris@chromium.org> Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
137 lines
3.1 KiB
C
137 lines
3.1 KiB
C
/*
|
|
* kdb helper for dumping the ftrace buffer
|
|
*
|
|
* Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
|
|
*
|
|
* ftrace_dump_buf based on ftrace_dump:
|
|
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/kgdb.h>
|
|
#include <linux/kdb.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
#include "trace.h"
|
|
#include "trace_output.h"
|
|
|
|
static void ftrace_dump_buf(int skip_lines, long cpu_file)
|
|
{
|
|
/* use static because iter can be a bit big for the stack */
|
|
static struct trace_iterator iter;
|
|
unsigned int old_userobj;
|
|
int cnt = 0, cpu;
|
|
|
|
trace_init_global_iter(&iter);
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
}
|
|
|
|
old_userobj = trace_flags;
|
|
|
|
/* don't look at user memory in panic mode */
|
|
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
|
|
|
kdb_printf("Dumping ftrace buffer:\n");
|
|
|
|
/* reset all but tr, trace, and overruns */
|
|
memset(&iter.seq, 0,
|
|
sizeof(struct trace_iterator) -
|
|
offsetof(struct trace_iterator, seq));
|
|
iter.iter_flags |= TRACE_FILE_LAT_FMT;
|
|
iter.pos = -1;
|
|
|
|
if (cpu_file == RING_BUFFER_ALL_CPUS) {
|
|
for_each_tracing_cpu(cpu) {
|
|
iter.buffer_iter[cpu] =
|
|
ring_buffer_read_prepare(iter.trace_buffer->buffer,
|
|
cpu, GFP_ATOMIC);
|
|
ring_buffer_read_start(iter.buffer_iter[cpu]);
|
|
tracing_iter_reset(&iter, cpu);
|
|
}
|
|
} else {
|
|
iter.cpu_file = cpu_file;
|
|
iter.buffer_iter[cpu_file] =
|
|
ring_buffer_read_prepare(iter.trace_buffer->buffer,
|
|
cpu_file, GFP_ATOMIC);
|
|
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
|
|
tracing_iter_reset(&iter, cpu_file);
|
|
}
|
|
if (!trace_empty(&iter))
|
|
trace_find_next_entry_inc(&iter);
|
|
while (!trace_empty(&iter)) {
|
|
if (!cnt)
|
|
kdb_printf("---------------------------------\n");
|
|
cnt++;
|
|
|
|
if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines)
|
|
print_trace_line(&iter);
|
|
if (!skip_lines)
|
|
trace_printk_seq(&iter.seq);
|
|
else
|
|
skip_lines--;
|
|
if (KDB_FLAG(CMD_INTERRUPT))
|
|
goto out;
|
|
}
|
|
|
|
if (!cnt)
|
|
kdb_printf(" (ftrace buffer empty)\n");
|
|
else
|
|
kdb_printf("---------------------------------\n");
|
|
|
|
out:
|
|
trace_flags = old_userobj;
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
}
|
|
|
|
for_each_tracing_cpu(cpu)
|
|
if (iter.buffer_iter[cpu])
|
|
ring_buffer_read_finish(iter.buffer_iter[cpu]);
|
|
}
|
|
|
|
/*
|
|
* kdb_ftdump - Dump the ftrace log buffer
|
|
*/
|
|
static int kdb_ftdump(int argc, const char **argv)
|
|
{
|
|
int skip_lines = 0;
|
|
long cpu_file;
|
|
char *cp;
|
|
|
|
if (argc > 2)
|
|
return KDB_ARGCOUNT;
|
|
|
|
if (argc) {
|
|
skip_lines = simple_strtol(argv[1], &cp, 0);
|
|
if (*cp)
|
|
skip_lines = 0;
|
|
}
|
|
|
|
if (argc == 2) {
|
|
cpu_file = simple_strtol(argv[2], &cp, 0);
|
|
if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
|
|
!cpu_online(cpu_file))
|
|
return KDB_BADINT;
|
|
} else {
|
|
cpu_file = RING_BUFFER_ALL_CPUS;
|
|
}
|
|
|
|
kdb_trap_printk++;
|
|
ftrace_dump_buf(skip_lines, cpu_file);
|
|
kdb_trap_printk--;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __init int kdb_ftrace_register(void)
|
|
{
|
|
kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
|
|
"Dump ftrace log", 0, KDB_REPEAT_NONE);
|
|
return 0;
|
|
}
|
|
|
|
late_initcall(kdb_ftrace_register);
|