sched: window-stats: Enhance cpu busy time accounting

rq->curr/prev_runnable_sum counters represent cpu demand from various
tasks that have run on a cpu. Any task that runs on a cpu will have a
representation in rq->curr_runnable_sum. Their partial_demand value
will be included in rq->curr_runnable_sum. Since partial_demand is
derived from historical load samples for a task, rq->curr_runnable_sum
could represent "inflated/un-realistic" cpu usage. As an example, lets
say that task with partial_demand of 10ms runs for only 1ms on a cpu.
What is included in rq->curr_runnable_sum is 10ms (and not the actual
execution time of 1ms). This leads to cpu busy time being reported on
the upside causing frequency to stay higher than necessary.

This patch fixes cpu busy accounting scheme to strictly represent
actual usage. It also provides for conditional fixup of busy time upon
migration and upon heavy-task wakeup.

CRs-Fixed: 691443
Change-Id: Ic4092627668053934049af4dfef65d9b6b901e6b
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2014-09-01 13:26:53 +05:30 committed by Steve Muckle
parent dababc266f
commit 2568673dd6
9 changed files with 828 additions and 523 deletions

View File

@ -22,7 +22,7 @@ config CPU_FREQ_GOV_COMMON
config SCHED_FREQ_INPUT
bool "Scheduler inputs to cpufreq governor"
depends on SMP && FAIR_GROUP_SCHED
depends on SCHED_HMP
help
This option enables support for scheduler based CPU utilization
calculations which may then be used by any cpufreq governor. The

View File

@ -268,6 +268,8 @@ extern char ___assert_task_state[1 - 2*!!(
/* Task command name length */
#define TASK_COMM_LEN 16
extern const char *sched_window_reset_reasons[];
enum task_event {
PUT_PREV_TASK = 0,
PICK_NEXT_TASK = 1,
@ -1001,7 +1003,7 @@ struct sched_avg {
* choices of y < 1-2^(-32)*1024.
*/
u32 runnable_avg_sum, runnable_avg_period;
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
u32 runnable_avg_sum_scaled;
#endif
u64 last_runnable_update;
@ -1065,12 +1067,18 @@ struct ravg {
* sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
* demand for tasks.
*
* 'flags' can have either or both of PREV_WINDOW_CONTRIB and
* CURR_WINDOW_CONTRIB set.
* 'curr_window' represents task's contribution to cpu busy time
* statistics (rq->curr_runnable_sum) in current window
*
* 'prev_window' represents task's contribution to cpu busy time
* statistics (rq->prev_runnable_sum) in previous window
*/
u64 mark_start;
u32 sum, demand, partial_demand, flags;
u32 sum, demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
#ifdef CONFIG_SCHED_FREQ_INPUT
u32 curr_window, prev_window;
#endif
};
struct sched_entity {
@ -1153,7 +1161,7 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
struct ravg ravg;
#endif
#ifdef CONFIG_CGROUP_SCHED
@ -2131,7 +2139,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#endif
extern void sched_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
#if defined(CONFIG_SCHED_HMP) || defined(CONFIG_SCHED_FREQ_INPUT)
#ifdef CONFIG_SCHED_HMP
extern void sched_exit(struct task_struct *p);
#else
static inline void sched_exit(struct task_struct *p) { }

View File

@ -40,8 +40,10 @@ extern unsigned int sysctl_sched_wakeup_load_threshold;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_account_wait_time;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_freq_legacy_mode;
extern unsigned int sysctl_sched_gov_response_time;
extern unsigned int sysctl_sched_freq_account_wait_time;
extern unsigned int sysctl_sched_migration_fixup;
extern unsigned int sysctl_sched_heavy_task_pct;
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sysctl_sched_init_task_load_pct;

View File

@ -72,7 +72,7 @@ TRACE_EVENT(sched_enq_deq_task,
__field(unsigned long, cpu_load )
__field(unsigned int, rt_nr_running )
__field(unsigned int, cpus_allowed )
#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_HMP
__field(unsigned int, sum_scaled )
__field(unsigned int, period )
__field(unsigned int, demand )
@ -89,7 +89,7 @@ TRACE_EVENT(sched_enq_deq_task,
__entry->cpu_load = task_rq(p)->cpu_load[0];
__entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
__entry->cpus_allowed = cpus_allowed;
#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_HMP
__entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
__entry->period = p->se.avg.runnable_avg_period;
__entry->demand = p->ravg.demand;
@ -97,7 +97,7 @@ TRACE_EVENT(sched_enq_deq_task,
),
TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_HMP
" sum_scaled=%u period=%u demand=%u"
#endif
, __entry->cpu,
@ -106,7 +106,7 @@ TRACE_EVENT(sched_enq_deq_task,
__entry->prio, __entry->nr_running,
__entry->cpu_load, __entry->rt_nr_running,
__entry->cpus_allowed
#ifdef CONFIG_SCHED_FREQ_INPUT
#ifdef CONFIG_SCHED_HMP
, __entry->sum_scaled, __entry->period, __entry->demand
#endif
)
@ -214,10 +214,6 @@ TRACE_EVENT(sched_set_boost,
TP_printk("ref_count=%d", __entry->ref_count)
);
#endif /* CONFIG_SCHED_HMP */
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
TRACE_EVENT(sched_update_task_ravg,
TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
@ -230,8 +226,6 @@ TRACE_EVENT(sched_update_task_ravg,
__field( pid_t, pid )
__field( pid_t, cur_pid )
__field(unsigned int, cur_freq )
__field(unsigned int, cs )
__field(unsigned int, ps )
__field( u64, wallclock )
__field( u64, mark_start )
__field( u64, delta_m )
@ -240,9 +234,14 @@ TRACE_EVENT(sched_update_task_ravg,
__field( u64, irqtime )
__field(enum task_event, evt )
__field(unsigned int, demand )
__field(unsigned int, partial_demand )
__field(unsigned int, sum )
__field( int, cpu )
#ifdef CONFIG_SCHED_FREQ_INPUT
__field( u64, cs )
__field( u64, ps )
__field( u32, curr_window )
__field( u32, prev_window )
#endif
),
TP_fast_assign(
@ -253,43 +252,51 @@ TRACE_EVENT(sched_update_task_ravg,
__entry->cpu = rq->cpu;
__entry->cur_pid = rq->curr->pid;
__entry->cur_freq = rq->cur_freq;
__entry->cs = rq->curr_runnable_sum;
__entry->ps = rq->prev_runnable_sum;
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->mark_start = p->ravg.mark_start;
__entry->delta_m = (wallclock - p->ravg.mark_start);
__entry->demand = p->ravg.demand;
__entry->partial_demand = p->ravg.partial_demand;
__entry->sum = p->ravg.sum;
__entry->irqtime = irqtime;
#ifdef CONFIG_SCHED_FREQ_INPUT
__entry->cs = rq->curr_runnable_sum;
__entry->ps = rq->prev_runnable_sum;
__entry->curr_window = p->ravg.curr_window;
__entry->prev_window = p->ravg.prev_window;
#endif
),
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cs %u ps %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u partial_demand %u sum %u irqtime %llu",
__entry->wallclock, __entry->win_start, __entry->delta,
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
#ifdef CONFIG_SCHED_FREQ_INPUT
" cs %llu ps %llu cur_window %u prev_window %u"
#endif
, __entry->wallclock, __entry->win_start, __entry->delta,
task_event_names[__entry->evt], __entry->cpu,
__entry->cur_freq, __entry->cs, __entry->ps, __entry->cur_pid,
__entry->cur_freq, __entry->cur_pid,
__entry->pid, __entry->comm, __entry->mark_start,
__entry->delta_m, __entry->demand, __entry->partial_demand,
__entry->sum, __entry->irqtime)
__entry->delta_m, __entry->demand,
__entry->sum, __entry->irqtime
#ifdef CONFIG_SCHED_FREQ_INPUT
, __entry->cs, __entry->ps, __entry->curr_window,
__entry->prev_window
#endif
)
);
TRACE_EVENT(sched_update_history,
TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
int update_sum, int new_window, enum task_event evt),
enum task_event evt),
TP_ARGS(rq, p, runtime, samples, update_sum, new_window, evt),
TP_ARGS(rq, p, runtime, samples, evt),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field(unsigned int, runtime )
__field( int, samples )
__field( int, update_sum )
__field( int, new_window )
__field(enum task_event, evt )
__field(unsigned int, partial_demand )
__field(unsigned int, demand )
__array( u32, hist, RAVG_HIST_SIZE_MAX)
__field(unsigned int, nr_big_tasks )
@ -302,30 +309,60 @@ TRACE_EVENT(sched_update_history,
__entry->pid = p->pid;
__entry->runtime = runtime;
__entry->samples = samples;
__entry->update_sum = update_sum;
__entry->new_window = new_window;
__entry->evt = evt;
__entry->partial_demand = p->ravg.partial_demand;
__entry->demand = p->ravg.demand;
memcpy(__entry->hist, p->ravg.sum_history,
RAVG_HIST_SIZE_MAX * sizeof(u32));
#ifdef CONFIG_SCHED_HMP
__entry->nr_big_tasks = rq->nr_big_tasks;
__entry->nr_small_tasks = rq->nr_small_tasks;
#endif
__entry->cpu = rq->cpu;
),
TP_printk("%d (%s): runtime %u samples %d us %d nw %d event %s partial_demand %u demand %u (hist: %u %u %u %u %u) cpu %d nr_big %u nr_small %u",
TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d nr_big %u nr_small %u",
__entry->pid, __entry->comm,
__entry->runtime, __entry->samples, __entry->update_sum,
__entry->new_window, task_event_names[__entry->evt],
__entry->partial_demand, __entry->demand, __entry->hist[0],
__entry->runtime, __entry->samples,
task_event_names[__entry->evt],
__entry->demand, __entry->hist[0],
__entry->hist[1], __entry->hist[2], __entry->hist[3],
__entry->hist[4], __entry->cpu, __entry->nr_big_tasks,
__entry->nr_small_tasks)
);
TRACE_EVENT(sched_reset_all_window_stats,
TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
int reason, unsigned int old_val, unsigned int new_val),
TP_ARGS(window_start, window_size, time_taken,
reason, old_val, new_val),
TP_STRUCT__entry(
__field( u64, window_start )
__field( u64, window_size )
__field( u64, time_taken )
__field( int, reason )
__field(unsigned int, old_val )
__field(unsigned int, new_val )
),
TP_fast_assign(
__entry->window_start = window_start;
__entry->window_size = window_size;
__entry->time_taken = time_taken;
__entry->reason = reason;
__entry->old_val = old_val;
__entry->new_val = new_val;
),
TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
__entry->time_taken, __entry->window_start,
__entry->window_size,
sched_window_reset_reasons[__entry->reason],
__entry->old_val, __entry->new_val)
);
#ifdef CONFIG_SCHED_FREQ_INPUT
TRACE_EVENT(sched_migration_update_sum,
TP_PROTO(struct rq *rq, struct task_struct *p),
@ -334,9 +371,9 @@ TRACE_EVENT(sched_migration_update_sum,
TP_STRUCT__entry(
__field(int, cpu )
__field(int, cs )
__field(int, ps )
__field(int, pid )
__field( u64, cs )
__field( u64, ps )
),
TP_fast_assign(
@ -346,12 +383,10 @@ TRACE_EVENT(sched_migration_update_sum,
__entry->pid = p->pid;
),
TP_printk("cpu %d: cs %u ps %u pid %d", __entry->cpu,
TP_printk("cpu %d: cs %llu ps %llu pid %d", __entry->cpu,
__entry->cs, __entry->ps, __entry->pid)
);
#ifdef CONFIG_SCHED_FREQ_INPUT
TRACE_EVENT(sched_get_busy,
TP_PROTO(int cpu, u64 load),
@ -396,7 +431,7 @@ TRACE_EVENT(sched_freq_alert,
#endif /* CONFIG_SCHED_FREQ_INPUT */
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_HMP */
/*
* Tracepoint for waking up a task:

File diff suppressed because it is too large Load Diff

View File

@ -305,7 +305,7 @@ do { \
#ifdef CONFIG_SMP
P(cpu_power);
#endif
#if defined(CONFIG_SCHED_HMP) || defined(CONFIG_SCHED_FREQ_INPUT)
#ifdef CONFIG_SCHED_HMP
P(load_scale_factor);
P(capacity);
P(max_possible_capacity);
@ -580,7 +580,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
__P(load_avg);
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
P(ravg.demand);
P(se.avg.runnable_avg_sum_scaled);
#endif

View File

@ -1222,7 +1222,7 @@ static u32 __compute_runnable_contrib(u64 n)
static void add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta);
static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods);
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
/* Initial task load. Newly created tasks are assigned this load. */
unsigned int __read_mostly sched_init_task_load_pelt;
@ -1245,10 +1245,6 @@ unsigned int max_task_load(void)
return sched_ravg_window;
}
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP
/* Use this knob to turn on or off HMP-aware task placement logic */
unsigned int __read_mostly sched_enable_hmp = 0;
@ -1306,6 +1302,18 @@ unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
unsigned int __read_mostly sched_small_task;
unsigned int __read_mostly sysctl_sched_small_task_pct = 10;
/*
* Tasks with demand >= sched_heavy_task will have their
* window-based demand added to the previous window's CPU
* time when they wake up, if they have slept for at least
* one full window. This feature is disabled when the tunable
* is set to 0 (the default).
*/
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int __read_mostly sysctl_sched_heavy_task_pct;
unsigned int __read_mostly sched_heavy_task;
#endif
/*
* Tasks whose bandwidth consumption on a cpu is more than
* sched_upmigrate are considered "big" tasks. Big tasks will be
@ -1364,6 +1372,11 @@ void set_hmp_defaults(void)
sched_downmigrate =
pct_to_real(sysctl_sched_downmigrate_pct);
#ifdef CONFIG_SCHED_FREQ_INPUT
sched_heavy_task =
pct_to_real(sysctl_sched_heavy_task_pct);
#endif
sched_init_task_load_pelt =
div64_u64((u64)sysctl_sched_init_task_load_pct *
(u64)LOAD_AVG_MAX, 100);
@ -2230,15 +2243,14 @@ static inline int capacity(struct rq *rq)
#endif /* CONFIG_SCHED_HMP */
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
void init_new_task_load(struct task_struct *p)
{
int i;
memset(&p->ravg, 0, sizeof(struct ravg));
p->se.avg.decay_count = 0;
p->ravg.sum = 0;
p->ravg.flags = 0;
for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
p->ravg.sum_history[i] = sched_init_task_load_windows;
@ -2247,10 +2259,9 @@ void init_new_task_load(struct task_struct *p)
p->se.avg.runnable_avg_sum = sched_init_task_load_pelt;
p->se.avg.runnable_avg_sum_scaled = sched_init_task_load_pelt;
p->ravg.demand = sched_init_task_load_windows;
p->ravg.partial_demand = sched_init_task_load_windows;
}
#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#else /* CONFIG_SCHED_HMP */
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
@ -2269,7 +2280,7 @@ void init_new_task_load(struct task_struct *p)
#endif /* CONFIG_SMP && CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_HMP */
/*
* We can represent the historical contribution to runnable average as the
@ -2690,7 +2701,7 @@ static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
int force_update) {}
#endif
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
/* Return task demand in percentage scale */
unsigned int pct_task_load(struct task_struct *p)
@ -2740,7 +2751,7 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
periods);
}
#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#else /* CONFIG_SCHED_HMP */
static inline void
add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
@ -2751,7 +2762,7 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
{
}
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_HMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{

View File

@ -483,15 +483,13 @@ struct rq {
int cstate, wakeup_latency, wakeup_energy;
#endif
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
/*
* max_freq = user or thermal defined maximum
* max_possible_freq = maximum supported by hardware
*/
unsigned int cur_freq, max_freq, min_freq, max_possible_freq;
struct cpumask freq_domain_cpumask;
unsigned int freq_requested;
u64 freq_requested_ts;
u64 cumulative_runnable_avg;
int efficiency; /* Differentiate cpus with different IPC capability */
@ -500,6 +498,13 @@ struct rq {
int max_possible_capacity;
u64 window_start;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int freq_requested;
u64 freq_requested_ts;
#endif
#endif
#ifdef CONFIG_SCHED_FREQ_INPUT
u64 curr_runnable_sum;
u64 prev_runnable_sum;
#endif
@ -688,7 +693,7 @@ extern int group_balance_cpu(struct sched_group *sg);
extern void init_new_task_load(struct task_struct *p);
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SCHED_HMP
#define WINDOW_STATS_RECENT 0
#define WINDOW_STATS_MAX 1
@ -716,8 +721,9 @@ extern unsigned int sched_upmigrate;
extern unsigned int sched_downmigrate;
extern unsigned int sched_init_task_load_pelt;
extern unsigned int sched_init_task_load_windows;
extern void fixup_nr_big_small_task(int cpu);
extern unsigned int sched_heavy_task;
extern void fixup_nr_big_small_task(int cpu);
u64 scale_load_to_cpu(u64 load, int cpu);
unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
@ -744,7 +750,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}
#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#else /* CONFIG_SCHED_HMP */
static inline int pct_task_load(struct task_struct *p) { return 0; }
@ -773,7 +779,7 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
{
}
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_FREQ_INPUT
extern void check_for_freq_change(struct rq *rq);
@ -789,15 +795,9 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
}
#ifdef CONFIG_SCHED_HMP
#define init_task_load sysctl_sched_init_task_load_pct
#else
#define init_task_load 0
#endif
#else /* CONFIG_SCHED_FREQ_INPUT */
#define init_task_load 0
#define sched_migration_fixup 0
static inline void check_for_freq_change(struct rq *rq) { }

View File

@ -294,6 +294,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_wakeup_load_threshold",
.data = &sysctl_sched_wakeup_load_threshold,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_FREQ_INPUT
{
.procname = "sched_freq_inc_notify_slack_pct",
@ -309,15 +316,36 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
{
.procname = "sched_freq_legacy_mode",
.data = &sysctl_sched_freq_legacy_mode,
.procname = "sched_migration_fixup",
.data = &sysctl_sched_migration_fixup,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_window_update_handler,
},
{
.procname = "sched_freq_account_wait_time",
.data = &sysctl_sched_freq_account_wait_time,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_window_update_handler,
},
{
.procname = "sched_heavy_task",
.data = &sysctl_sched_heavy_task_pct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
},
{
.procname = "sched_gov_response_time",
.data = &sysctl_sched_gov_response_time,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SCHED_HMP
{
.procname = "sched_account_wait_time",
.data = &sysctl_sched_account_wait_time,
@ -339,22 +367,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_window_update_handler,
},
{
.procname = "sched_gov_response_time",
.data = &sysctl_sched_gov_response_time,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_wakeup_load_threshold",
.data = &sysctl_sched_wakeup_load_threshold,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SCHED_HMP
{
.procname = "sched_small_task",
.data = &sysctl_sched_small_task_pct,