diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index e7d95c9b1c3f..1457576991dd 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -116,9 +116,9 @@ TRACE_EVENT(sched_enq_deq_task, TRACE_EVENT(sched_task_load, - TP_PROTO(struct task_struct *p), + TP_PROTO(struct task_struct *p, int boost, int reason), - TP_ARGS(p), + TP_ARGS(p, boost, reason), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -127,6 +127,8 @@ TRACE_EVENT(sched_task_load, __field(unsigned int, sum_scaled ) __field(unsigned int, period ) __field(unsigned int, demand ) + __field( int, boost ) + __field( int, reason ) ), TP_fast_assign( @@ -136,11 +138,14 @@ TRACE_EVENT(sched_task_load, __entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled; __entry->period = p->se.avg.runnable_avg_period; __entry->demand = p->ravg.demand; + __entry->boost = boost; + __entry->reason = reason; ), - TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u", + TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d", __entry->pid, __entry->comm, __entry->sum, - __entry->sum_scaled, __entry->period, __entry->demand) + __entry->sum_scaled, __entry->period, __entry->demand, + __entry->boost, __entry->reason) ); TRACE_EVENT(sched_cpu_load, @@ -188,6 +193,23 @@ TRACE_EVENT(sched_cpu_load, __entry->power_cost) ); +TRACE_EVENT(sched_set_boost, + + TP_PROTO(int ref_count), + + TP_ARGS(ref_count), + + TP_STRUCT__entry( + __field(unsigned int, ref_count ) + ), + + TP_fast_assign( + __entry->ref_count = ref_count; + ), + + TP_printk("ref_count=%d", __entry->ref_count) +); + #endif /* CONFIG_SCHED_HMP */ #if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b34704ca721b..fad0a4745462 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1485,6 +1485,7 @@ int sched_set_boost(int enable) if (!old_refcount && boost_refcount) boost_kick_cpus(); + trace_sched_set_boost(boost_refcount); spin_unlock_irqrestore(&boost_lock, flags); return ret; @@ -1750,10 +1751,11 @@ static int select_best_cpu(struct task_struct *p, int target, int reason) int cpu_cost, min_cost = INT_MAX; u64 load, min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX; int small_task = is_small_task(p); + int boost = sched_boost(); - trace_sched_task_load(p); + trace_sched_task_load(p, boost, reason); - if (small_task && !sched_boost()) { + if (small_task && !boost) { best_cpu = best_small_task_cpu(p); goto done; }