mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
sched: Extend ftrace event to record boost and reason code
Add a new ftrace event to record changes to boost setting. Also extend sched_task_load() ftrace event to record boost setting and reason code passed to select_best_cpu(). This will be useful for debug purpose. Change-Id: Idac72f86d954472abe9f88a8db184343b7730287 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
This commit is contained in:
parent
232b0fe6f4
commit
1f12e6698c
2 changed files with 30 additions and 6 deletions
|
@ -116,9 +116,9 @@ TRACE_EVENT(sched_enq_deq_task,
|
|||
|
||||
TRACE_EVENT(sched_task_load,
|
||||
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_PROTO(struct task_struct *p, int boost, int reason),
|
||||
|
||||
TP_ARGS(p),
|
||||
TP_ARGS(p, boost, reason),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array( char, comm, TASK_COMM_LEN )
|
||||
|
@ -127,6 +127,8 @@ TRACE_EVENT(sched_task_load,
|
|||
__field(unsigned int, sum_scaled )
|
||||
__field(unsigned int, period )
|
||||
__field(unsigned int, demand )
|
||||
__field( int, boost )
|
||||
__field( int, reason )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -136,11 +138,14 @@ TRACE_EVENT(sched_task_load,
|
|||
__entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
|
||||
__entry->period = p->se.avg.runnable_avg_period;
|
||||
__entry->demand = p->ravg.demand;
|
||||
__entry->boost = boost;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
|
||||
TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u",
|
||||
TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d",
|
||||
__entry->pid, __entry->comm, __entry->sum,
|
||||
__entry->sum_scaled, __entry->period, __entry->demand)
|
||||
__entry->sum_scaled, __entry->period, __entry->demand,
|
||||
__entry->boost, __entry->reason)
|
||||
);
|
||||
|
||||
TRACE_EVENT(sched_cpu_load,
|
||||
|
@ -188,6 +193,23 @@ TRACE_EVENT(sched_cpu_load,
|
|||
__entry->power_cost)
|
||||
);
|
||||
|
||||
TRACE_EVENT(sched_set_boost,
|
||||
|
||||
TP_PROTO(int ref_count),
|
||||
|
||||
TP_ARGS(ref_count),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, ref_count )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ref_count = ref_count;
|
||||
),
|
||||
|
||||
TP_printk("ref_count=%d", __entry->ref_count)
|
||||
);
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
|
||||
|
|
|
@ -1485,6 +1485,7 @@ int sched_set_boost(int enable)
|
|||
if (!old_refcount && boost_refcount)
|
||||
boost_kick_cpus();
|
||||
|
||||
trace_sched_set_boost(boost_refcount);
|
||||
spin_unlock_irqrestore(&boost_lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -1750,10 +1751,11 @@ static int select_best_cpu(struct task_struct *p, int target, int reason)
|
|||
int cpu_cost, min_cost = INT_MAX;
|
||||
u64 load, min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX;
|
||||
int small_task = is_small_task(p);
|
||||
int boost = sched_boost();
|
||||
|
||||
trace_sched_task_load(p);
|
||||
trace_sched_task_load(p, boost, reason);
|
||||
|
||||
if (small_task && !sched_boost()) {
|
||||
if (small_task && !boost) {
|
||||
best_cpu = best_small_task_cpu(p);
|
||||
goto done;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue