mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
Merge branch 'perf/urgent' into perf/core
Merge reason: Pick up pending fixes before applying dependent new changes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
2aa61274ef
13 changed files with 194 additions and 88 deletions
|
@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
/*
|
/*
|
||||||
* event overflow
|
* event overflow
|
||||||
*/
|
*/
|
||||||
handled = 1;
|
handled++;
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
|
|
||||||
if (!x86_perf_event_set_period(event))
|
if (!x86_perf_event_set_period(event))
|
||||||
|
@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct pmu_nmi_state {
|
||||||
|
unsigned int marked;
|
||||||
|
int handled;
|
||||||
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
|
||||||
|
|
||||||
static int __kprobes
|
static int __kprobes
|
||||||
perf_event_nmi_handler(struct notifier_block *self,
|
perf_event_nmi_handler(struct notifier_block *self,
|
||||||
unsigned long cmd, void *__args)
|
unsigned long cmd, void *__args)
|
||||||
{
|
{
|
||||||
struct die_args *args = __args;
|
struct die_args *args = __args;
|
||||||
struct pt_regs *regs;
|
unsigned int this_nmi;
|
||||||
|
int handled;
|
||||||
|
|
||||||
if (!atomic_read(&active_events))
|
if (!atomic_read(&active_events))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
|
||||||
case DIE_NMI:
|
case DIE_NMI:
|
||||||
case DIE_NMI_IPI:
|
case DIE_NMI_IPI:
|
||||||
break;
|
break;
|
||||||
|
case DIE_NMIUNKNOWN:
|
||||||
|
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||||
|
if (this_nmi != __get_cpu_var(pmu_nmi).marked)
|
||||||
|
/* let the kernel handle the unknown nmi */
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
/*
|
||||||
|
* This one is a PMU back-to-back nmi. Two events
|
||||||
|
* trigger 'simultaneously' raising two back-to-back
|
||||||
|
* NMIs. If the first NMI handles both, the latter
|
||||||
|
* will be empty and daze the CPU. So, we drop it to
|
||||||
|
* avoid false-positive 'unknown nmi' messages.
|
||||||
|
*/
|
||||||
|
return NOTIFY_STOP;
|
||||||
default:
|
default:
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
regs = args->regs;
|
|
||||||
|
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
|
|
||||||
|
handled = x86_pmu.handle_irq(args->regs);
|
||||||
|
if (!handled)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||||
|
if ((handled > 1) ||
|
||||||
|
/* the next nmi could be a back-to-back nmi */
|
||||||
|
((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
|
||||||
|
(__get_cpu_var(pmu_nmi).handled > 1))) {
|
||||||
/*
|
/*
|
||||||
* Can't rely on the handled return value to say it was our NMI, two
|
* We could have two subsequent back-to-back nmis: The
|
||||||
* events could trigger 'simultaneously' raising two back-to-back NMIs.
|
* first handles more than one counter, the 2nd
|
||||||
|
* handles only one counter and the 3rd handles no
|
||||||
|
* counter.
|
||||||
*
|
*
|
||||||
* If the first NMI handles both, the latter will be empty and daze
|
* This is the 2nd nmi because the previous was
|
||||||
* the CPU.
|
* handling more than one counter. We will mark the
|
||||||
|
* next (3rd) and then drop it if unhandled.
|
||||||
*/
|
*/
|
||||||
x86_pmu.handle_irq(regs);
|
__get_cpu_var(pmu_nmi).marked = this_nmi + 1;
|
||||||
|
__get_cpu_var(pmu_nmi).handled = handled;
|
||||||
|
}
|
||||||
|
|
||||||
return NOTIFY_STOP;
|
return NOTIFY_STOP;
|
||||||
}
|
}
|
||||||
|
|
|
@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct cpu_hw_events *cpuc;
|
||||||
int bit, loops;
|
int bit, loops;
|
||||||
u64 ack, status;
|
u64 status;
|
||||||
|
int handled = 0;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0);
|
||||||
|
|
||||||
|
@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
|
|
||||||
loops = 0;
|
loops = 0;
|
||||||
again:
|
again:
|
||||||
|
intel_pmu_ack_status(status);
|
||||||
if (++loops > 100) {
|
if (++loops > 100) {
|
||||||
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
||||||
perf_event_print_debug();
|
perf_event_print_debug();
|
||||||
|
@ -736,19 +738,22 @@ again:
|
||||||
}
|
}
|
||||||
|
|
||||||
inc_irq_stat(apic_perf_irqs);
|
inc_irq_stat(apic_perf_irqs);
|
||||||
ack = status;
|
|
||||||
|
|
||||||
intel_pmu_lbr_read();
|
intel_pmu_lbr_read();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PEBS overflow sets bit 62 in the global status register
|
* PEBS overflow sets bit 62 in the global status register
|
||||||
*/
|
*/
|
||||||
if (__test_and_clear_bit(62, (unsigned long *)&status))
|
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
|
||||||
|
handled++;
|
||||||
x86_pmu.drain_pebs(regs);
|
x86_pmu.drain_pebs(regs);
|
||||||
|
}
|
||||||
|
|
||||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||||
struct perf_event *event = cpuc->events[bit];
|
struct perf_event *event = cpuc->events[bit];
|
||||||
|
|
||||||
|
handled++;
|
||||||
|
|
||||||
if (!test_bit(bit, cpuc->active_mask))
|
if (!test_bit(bit, cpuc->active_mask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -761,8 +766,6 @@ again:
|
||||||
x86_pmu_stop(event);
|
x86_pmu_stop(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_pmu_ack_status(ack);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Repeat if there is more work to be done:
|
* Repeat if there is more work to be done:
|
||||||
*/
|
*/
|
||||||
|
@ -772,7 +775,7 @@ again:
|
||||||
|
|
||||||
done:
|
done:
|
||||||
intel_pmu_enable_all(0);
|
intel_pmu_enable_all(0);
|
||||||
return 1;
|
return handled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct event_constraint *
|
static struct event_constraint *
|
||||||
|
|
|
@ -936,7 +936,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||||
inc_irq_stat(apic_perf_irqs);
|
inc_irq_stat(apic_perf_irqs);
|
||||||
}
|
}
|
||||||
|
|
||||||
return handled > 0;
|
return handled;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -568,8 +568,13 @@ static int __init init_sysfs(void)
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = sysdev_class_register(&oprofile_sysclass);
|
error = sysdev_class_register(&oprofile_sysclass);
|
||||||
if (!error)
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
error = sysdev_register(&device_oprofile);
|
error = sysdev_register(&device_oprofile);
|
||||||
|
if (error)
|
||||||
|
sysdev_class_unregister(&oprofile_sysclass);
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -580,8 +585,10 @@ static void exit_sysfs(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define init_sysfs() do { } while (0)
|
|
||||||
#define exit_sysfs() do { } while (0)
|
static inline int init_sysfs(void) { return 0; }
|
||||||
|
static inline void exit_sysfs(void) { }
|
||||||
|
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
static int __init p4_init(char **cpu_type)
|
static int __init p4_init(char **cpu_type)
|
||||||
|
@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||||
char *cpu_type = NULL;
|
char *cpu_type = NULL;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
using_nmi = 0;
|
||||||
|
|
||||||
if (!cpu_has_apic)
|
if (!cpu_has_apic)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||||
|
|
||||||
mux_init(ops);
|
mux_init(ops);
|
||||||
|
|
||||||
init_sysfs();
|
ret = init_sysfs();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
using_nmi = 1;
|
using_nmi = 1;
|
||||||
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
|
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
|
||||||
.notifier_call = module_load_notify,
|
.notifier_call = module_load_notify,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static void end_sync(void)
|
|
||||||
{
|
|
||||||
end_cpu_work();
|
|
||||||
/* make sure we don't leak task structs */
|
|
||||||
process_task_mortuary();
|
|
||||||
process_task_mortuary();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int sync_start(void)
|
int sync_start(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -158,7 +148,7 @@ int sync_start(void)
|
||||||
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
start_cpu_work();
|
mutex_lock(&buffer_mutex);
|
||||||
|
|
||||||
err = task_handoff_register(&task_free_nb);
|
err = task_handoff_register(&task_free_nb);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -173,7 +163,10 @@ int sync_start(void)
|
||||||
if (err)
|
if (err)
|
||||||
goto out4;
|
goto out4;
|
||||||
|
|
||||||
|
start_cpu_work();
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
mutex_unlock(&buffer_mutex);
|
||||||
return err;
|
return err;
|
||||||
out4:
|
out4:
|
||||||
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
|
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
|
||||||
|
@ -182,7 +175,6 @@ out3:
|
||||||
out2:
|
out2:
|
||||||
task_handoff_unregister(&task_free_nb);
|
task_handoff_unregister(&task_free_nb);
|
||||||
out1:
|
out1:
|
||||||
end_sync();
|
|
||||||
free_cpumask_var(marked_cpus);
|
free_cpumask_var(marked_cpus);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -190,11 +182,20 @@ out1:
|
||||||
|
|
||||||
void sync_stop(void)
|
void sync_stop(void)
|
||||||
{
|
{
|
||||||
|
/* flush buffers */
|
||||||
|
mutex_lock(&buffer_mutex);
|
||||||
|
end_cpu_work();
|
||||||
unregister_module_notifier(&module_load_nb);
|
unregister_module_notifier(&module_load_nb);
|
||||||
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
|
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
|
||||||
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
|
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
|
||||||
task_handoff_unregister(&task_free_nb);
|
task_handoff_unregister(&task_free_nb);
|
||||||
end_sync();
|
mutex_unlock(&buffer_mutex);
|
||||||
|
flush_scheduled_work();
|
||||||
|
|
||||||
|
/* make sure we don't leak task structs */
|
||||||
|
process_task_mortuary();
|
||||||
|
process_task_mortuary();
|
||||||
|
|
||||||
free_cpumask_var(marked_cpus);
|
free_cpumask_var(marked_cpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -120,8 +120,6 @@ void end_cpu_work(void)
|
||||||
|
|
||||||
cancel_delayed_work(&b->work);
|
cancel_delayed_work(&b->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
flush_scheduled_work();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
event_filter_match(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return event->cpu == -1 || event->cpu == smp_processor_id();
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
event_sched_out(struct perf_event *event,
|
event_sched_out(struct perf_event *event,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx)
|
struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
|
u64 delta;
|
||||||
|
/*
|
||||||
|
* An event which could not be activated because of
|
||||||
|
* filter mismatch still needs to have its timings
|
||||||
|
* maintained, otherwise bogus information is return
|
||||||
|
* via read() for time_enabled, time_running:
|
||||||
|
*/
|
||||||
|
if (event->state == PERF_EVENT_STATE_INACTIVE
|
||||||
|
&& !event_filter_match(event)) {
|
||||||
|
delta = ctx->time - event->tstamp_stopped;
|
||||||
|
event->tstamp_running += delta;
|
||||||
|
event->tstamp_stopped = ctx->time;
|
||||||
|
}
|
||||||
|
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
|
||||||
struct perf_event_context *ctx)
|
struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
|
int state = group_event->state;
|
||||||
if (group_event->state != PERF_EVENT_STATE_ACTIVE)
|
|
||||||
return;
|
|
||||||
|
|
||||||
event_sched_out(group_event, cpuctx, ctx);
|
event_sched_out(group_event, cpuctx, ctx);
|
||||||
|
|
||||||
|
@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
|
||||||
list_for_each_entry(event, &group_event->sibling_list, group_entry)
|
list_for_each_entry(event, &group_event->sibling_list, group_entry)
|
||||||
event_sched_out(event, cpuctx, ctx);
|
event_sched_out(event, cpuctx, ctx);
|
||||||
|
|
||||||
if (group_event->attr.exclusive)
|
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
|
||||||
cpuctx->exclusive = 0;
|
cpuctx->exclusive = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5942,15 +5960,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
unsigned int cpu = (long)hcpu;
|
unsigned int cpu = (long)hcpu;
|
||||||
|
|
||||||
switch (action) {
|
switch (action & ~CPU_TASKS_FROZEN) {
|
||||||
|
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
case CPU_DOWN_FAILED:
|
||||||
perf_event_init_cpu(cpu);
|
perf_event_init_cpu(cpu);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case CPU_UP_CANCELED:
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
case CPU_DOWN_PREPARE_FROZEN:
|
|
||||||
perf_event_exit_cpu(cpu);
|
perf_event_exit_cpu(cpu);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct ftrace_profile *rec = v;
|
struct ftrace_profile *rec = v;
|
||||||
char str[KSYM_SYMBOL_LEN];
|
char str[KSYM_SYMBOL_LEN];
|
||||||
|
int ret = 0;
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
static DEFINE_MUTEX(mutex);
|
|
||||||
static struct trace_seq s;
|
static struct trace_seq s;
|
||||||
unsigned long long avg;
|
unsigned long long avg;
|
||||||
unsigned long long stddev;
|
unsigned long long stddev;
|
||||||
#endif
|
#endif
|
||||||
|
mutex_lock(&ftrace_profile_lock);
|
||||||
|
|
||||||
|
/* we raced with function_profile_reset() */
|
||||||
|
if (unlikely(rec->counter == 0)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
|
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
|
||||||
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
|
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
|
||||||
|
@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||||
do_div(stddev, (rec->counter - 1) * 1000);
|
do_div(stddev, (rec->counter - 1) * 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&mutex);
|
|
||||||
trace_seq_init(&s);
|
trace_seq_init(&s);
|
||||||
trace_print_graph_duration(rec->time, &s);
|
trace_print_graph_duration(rec->time, &s);
|
||||||
trace_seq_puts(&s, " ");
|
trace_seq_puts(&s, " ");
|
||||||
|
@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||||
trace_seq_puts(&s, " ");
|
trace_seq_puts(&s, " ");
|
||||||
trace_print_graph_duration(stddev, &s);
|
trace_print_graph_duration(stddev, &s);
|
||||||
trace_print_seq(m, &s);
|
trace_print_seq(m, &s);
|
||||||
mutex_unlock(&mutex);
|
|
||||||
#endif
|
#endif
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
|
out:
|
||||||
|
mutex_unlock(&ftrace_profile_lock);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
|
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
|
||||||
|
@ -2409,7 +2416,7 @@ static const struct file_operations ftrace_filter_fops = {
|
||||||
.open = ftrace_filter_open,
|
.open = ftrace_filter_open,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
.write = ftrace_filter_write,
|
.write = ftrace_filter_write,
|
||||||
.llseek = ftrace_regex_lseek,
|
.llseek = no_llseek,
|
||||||
.release = ftrace_filter_release,
|
.release = ftrace_filter_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
|
||||||
tp_event->class && tp_event->class->reg &&
|
tp_event->class && tp_event->class->reg &&
|
||||||
try_module_get(tp_event->mod)) {
|
try_module_get(tp_event->mod)) {
|
||||||
ret = perf_trace_event_init(tp_event, p_event);
|
ret = perf_trace_event_init(tp_event, p_event);
|
||||||
|
if (ret)
|
||||||
|
module_put(tp_event->mod);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -147,6 +149,7 @@ void perf_trace_destroy(struct perf_event *p_event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
module_put(tp_event->mod);
|
||||||
mutex_unlock(&event_mutex);
|
mutex_unlock(&event_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
|
||||||
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
|
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
|
||||||
struct pt_regs *regs);
|
struct pt_regs *regs);
|
||||||
|
|
||||||
/* Check the name is good for event/group */
|
/* Check the name is good for event/group/fields */
|
||||||
static int check_event_name(const char *name)
|
static int is_good_name(const char *name)
|
||||||
{
|
{
|
||||||
if (!isalpha(*name) && *name != '_')
|
if (!isalpha(*name) && *name != '_')
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
|
||||||
else
|
else
|
||||||
tp->rp.kp.pre_handler = kprobe_dispatcher;
|
tp->rp.kp.pre_handler = kprobe_dispatcher;
|
||||||
|
|
||||||
if (!event || !check_event_name(event)) {
|
if (!event || !is_good_name(event)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
|
||||||
if (!tp->call.name)
|
if (!tp->call.name)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (!group || !check_event_name(group)) {
|
if (!group || !is_good_name(group)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
int is_return = 0, is_delete = 0;
|
int is_return = 0, is_delete = 0;
|
||||||
char *symbol = NULL, *event = NULL, *group = NULL;
|
char *symbol = NULL, *event = NULL, *group = NULL;
|
||||||
char *arg, *tmp;
|
char *arg;
|
||||||
unsigned long offset = 0;
|
unsigned long offset = 0;
|
||||||
void *addr = NULL;
|
void *addr = NULL;
|
||||||
char buf[MAX_EVENT_NAME_LEN];
|
char buf[MAX_EVENT_NAME_LEN];
|
||||||
|
@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
/* parse arguments */
|
/* parse arguments */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
|
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
|
||||||
|
/* Increment count for freeing args in error case */
|
||||||
|
tp->nr_args++;
|
||||||
|
|
||||||
/* Parse argument name */
|
/* Parse argument name */
|
||||||
arg = strchr(argv[i], '=');
|
arg = strchr(argv[i], '=');
|
||||||
if (arg)
|
if (arg) {
|
||||||
*arg++ = '\0';
|
*arg++ = '\0';
|
||||||
else
|
|
||||||
arg = argv[i];
|
|
||||||
|
|
||||||
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
|
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
|
||||||
|
} else {
|
||||||
|
arg = argv[i];
|
||||||
|
/* If argument name is omitted, set "argN" */
|
||||||
|
snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
|
||||||
|
tp->args[i].name = kstrdup(buf, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
if (!tp->args[i].name) {
|
if (!tp->args[i].name) {
|
||||||
pr_info("Failed to allocate argument%d name '%s'.\n",
|
pr_info("Failed to allocate argument[%d] name.\n", i);
|
||||||
i, argv[i]);
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
tmp = strchr(tp->args[i].name, ':');
|
|
||||||
if (tmp)
|
if (!is_good_name(tp->args[i].name)) {
|
||||||
*tmp = '_'; /* convert : to _ */
|
pr_info("Invalid argument[%d] name: %s\n",
|
||||||
|
i, tp->args[i].name);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
if (conflict_field_name(tp->args[i].name, tp->args, i)) {
|
if (conflict_field_name(tp->args[i].name, tp->args, i)) {
|
||||||
pr_info("Argument%d name '%s' conflicts with "
|
pr_info("Argument[%d] name '%s' conflicts with "
|
||||||
"another field.\n", i, argv[i]);
|
"another field.\n", i, argv[i]);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
/* Parse fetch argument */
|
/* Parse fetch argument */
|
||||||
ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
|
ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Parse error at argument%d. (%d)\n", i, ret);
|
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
|
||||||
kfree(tp->args[i].name);
|
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
tp->nr_args++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = register_trace_probe(tp);
|
ret = register_trace_probe(tp);
|
||||||
|
|
|
@ -121,7 +121,7 @@ static void __touch_watchdog(void)
|
||||||
|
|
||||||
void touch_softlockup_watchdog(void)
|
void touch_softlockup_watchdog(void)
|
||||||
{
|
{
|
||||||
__get_cpu_var(watchdog_touch_ts) = 0;
|
__raw_get_cpu_var(watchdog_touch_ts) = 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||||
|
|
||||||
|
@ -141,7 +141,14 @@ void touch_all_softlockup_watchdogs(void)
|
||||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||||
void touch_nmi_watchdog(void)
|
void touch_nmi_watchdog(void)
|
||||||
{
|
{
|
||||||
__get_cpu_var(watchdog_nmi_touch) = true;
|
if (watchdog_enabled) {
|
||||||
|
unsigned cpu;
|
||||||
|
|
||||||
|
for_each_present_cpu(cpu) {
|
||||||
|
if (per_cpu(watchdog_nmi_touch, cpu) != true)
|
||||||
|
per_cpu(watchdog_nmi_touch, cpu) = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
touch_softlockup_watchdog();
|
touch_softlockup_watchdog();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(touch_nmi_watchdog);
|
EXPORT_SYMBOL(touch_nmi_watchdog);
|
||||||
|
@ -422,6 +429,9 @@ static int watchdog_enable(int cpu)
|
||||||
wake_up_process(p);
|
wake_up_process(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* if any cpu succeeds, watchdog is considered enabled for the system */
|
||||||
|
watchdog_enabled = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -444,9 +454,6 @@ static void watchdog_disable(int cpu)
|
||||||
per_cpu(softlockup_watchdog, cpu) = NULL;
|
per_cpu(softlockup_watchdog, cpu) = NULL;
|
||||||
kthread_stop(p);
|
kthread_stop(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if any cpu succeeds, watchdog is considered enabled for the system */
|
|
||||||
watchdog_enabled = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void watchdog_enable_all_cpus(void)
|
static void watchdog_enable_all_cpus(void)
|
||||||
|
|
|
@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
tev->point.offset = pev->point.offset;
|
tev->point.offset = pev->point.offset;
|
||||||
|
tev->point.retprobe = pev->point.retprobe;
|
||||||
tev->nargs = pev->nargs;
|
tev->nargs = pev->nargs;
|
||||||
if (tev->nargs) {
|
if (tev->nargs) {
|
||||||
tev->args = zalloc(sizeof(struct probe_trace_arg)
|
tev->args = zalloc(sizeof(struct probe_trace_arg)
|
||||||
|
|
|
@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
|
||||||
char buf[32], *ptr;
|
char buf[32], *ptr;
|
||||||
int ret, nscopes;
|
int ret, nscopes;
|
||||||
|
|
||||||
|
if (!is_c_varname(pf->pvar->var)) {
|
||||||
|
/* Copy raw parameters */
|
||||||
|
pf->tvar->value = strdup(pf->pvar->var);
|
||||||
|
if (pf->tvar->value == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
if (pf->pvar->type) {
|
||||||
|
pf->tvar->type = strdup(pf->pvar->type);
|
||||||
|
if (pf->tvar->type == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
if (pf->pvar->name) {
|
||||||
|
pf->tvar->name = strdup(pf->pvar->name);
|
||||||
|
if (pf->tvar->name == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
} else
|
||||||
|
pf->tvar->name = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (pf->pvar->name)
|
if (pf->pvar->name)
|
||||||
pf->tvar->name = strdup(pf->pvar->name);
|
pf->tvar->name = strdup(pf->pvar->name);
|
||||||
else {
|
else {
|
||||||
|
@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
|
||||||
if (pf->tvar->name == NULL)
|
if (pf->tvar->name == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (!is_c_varname(pf->pvar->var)) {
|
|
||||||
/* Copy raw parameters */
|
|
||||||
pf->tvar->value = strdup(pf->pvar->var);
|
|
||||||
if (pf->tvar->value == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
if (pf->pvar->type) {
|
|
||||||
pf->tvar->type = strdup(pf->pvar->type);
|
|
||||||
if (pf->tvar->type == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_debug("Searching '%s' variable in context.\n",
|
pr_debug("Searching '%s' variable in context.\n",
|
||||||
pf->pvar->var);
|
pf->pvar->var);
|
||||||
/* Search child die for local variables and parameters. */
|
/* Search child die for local variables and parameters. */
|
||||||
|
@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
|
||||||
/* This function has no name. */
|
/* This function has no name. */
|
||||||
tev->point.offset = (unsigned long)pf->addr;
|
tev->point.offset = (unsigned long)pf->addr;
|
||||||
|
|
||||||
|
/* Return probe must be on the head of a subprogram */
|
||||||
|
if (pf->pev->point.retprobe) {
|
||||||
|
if (tev->point.offset != 0) {
|
||||||
|
pr_warning("Return probe must be on the head of"
|
||||||
|
" a real function\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
tev->point.retprobe = true;
|
||||||
|
}
|
||||||
|
|
||||||
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
|
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
|
||||||
tev->point.offset);
|
tev->point.offset);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue