Revert "sched: Use only partial wait time as task demand"
This reverts commit 14fd2e5918
("sched: Use only partial wait time as
task demand") as it causes performance regression.
Change-Id: Iaddfce9c98bff328f50d746c9a86a0c8c34aa0b9
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
[pkondeti@codeaurora.org: Resolved minor conflict]
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
parent
9c61dea3eb
commit
fffdb903c9
|
@ -1350,17 +1350,8 @@ Appears at: /proc/sys/kernel/sched_account_wait_time
|
||||||
|
|
||||||
Default value: 1
|
Default value: 1
|
||||||
|
|
||||||
This controls whether a tasks wait time is accounted as its demand for cpu
|
This controls whether a task's wait time is accounted as its demand for cpu
|
||||||
and thus the values found in its sum, sum_history[] and demand attributes.
|
and thus the values found in its sum, sum_history[] and demand attributes.
|
||||||
The load tracking algorithm only considers part of a tasks wait time as its
|
|
||||||
demand. The portion of wait time accounted as demand is determined by each
|
|
||||||
tasks percent load, i.e. a task that waits for 10ms and has 60 % task load,
|
|
||||||
only 6 ms of the wait will contribute to task demand. This approach is fair
|
|
||||||
as the scheduler tries to determine how much of its wait time would a task
|
|
||||||
actually have been using the CPU if it had been executing. It ensures that
|
|
||||||
tasks with high demand continue to see most of the benefits of accounting
|
|
||||||
wait time as busy time, however, lower demand tasks don't experience a
|
|
||||||
disproportionately high boost to demand.
|
|
||||||
|
|
||||||
*** 7.16 sched_freq_account_wait_time
|
*** 7.16 sched_freq_account_wait_time
|
||||||
|
|
||||||
|
|
|
@ -1693,23 +1693,19 @@ static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
|
||||||
|
|
||||||
static int account_busy_for_task_demand(struct task_struct *p, int event)
|
static int account_busy_for_task_demand(struct task_struct *p, int event)
|
||||||
{
|
{
|
||||||
|
/* No need to bother updating task demand for exiting tasks
|
||||||
|
* or the idle task. */
|
||||||
if (exiting_task(p) || is_idle_task(p))
|
if (exiting_task(p) || is_idle_task(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/* When a task is waking up it is completing a segment of non-busy
|
||||||
* When a task is waking up it is completing a segment of non-busy
|
|
||||||
* time. Likewise, if wait time is not treated as busy time, then
|
* time. Likewise, if wait time is not treated as busy time, then
|
||||||
* when a task begins to run or is migrated, it is not running and
|
* when a task begins to run or is migrated, it is not running and
|
||||||
* is completing a segment of non-busy time.
|
* is completing a segment of non-busy time. */
|
||||||
*/
|
|
||||||
if (event == TASK_WAKE || (!sched_account_wait_time &&
|
if (event == TASK_WAKE || (!sched_account_wait_time &&
|
||||||
(event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
|
(event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* We are left with TASK_UPDATE, IRQ_UPDATE, PUT_PREV_TASK and
|
|
||||||
* wait time being accounted as busy time.
|
|
||||||
*/
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1787,15 +1783,6 @@ static void add_to_task_demand(struct rq *rq, struct task_struct *p,
|
||||||
p->ravg.sum = sched_ravg_window;
|
p->ravg.sum = sched_ravg_window;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 wait_adjust(struct task_struct *p, u64 delta, int event)
|
|
||||||
{
|
|
||||||
/* We already know that wait time counts as busy time. */
|
|
||||||
if (event == PICK_NEXT_TASK || event == TASK_MIGRATE)
|
|
||||||
return div64_u64(delta * task_load(p), max_task_load());
|
|
||||||
|
|
||||||
return delta;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Account cpu demand of task and/or update task's cpu demand history
|
* Account cpu demand of task and/or update task's cpu demand history
|
||||||
*
|
*
|
||||||
|
@ -1870,8 +1857,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
|
||||||
if (!new_window) {
|
if (!new_window) {
|
||||||
/* The simple case - busy time contained within the existing
|
/* The simple case - busy time contained within the existing
|
||||||
* window. */
|
* window. */
|
||||||
add_to_task_demand(rq, p, wait_adjust(p,
|
add_to_task_demand(rq, p, wallclock - mark_start);
|
||||||
wallclock - mark_start, event));
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1882,14 +1868,13 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
|
||||||
window_start -= (u64)nr_full_windows * (u64)window_size;
|
window_start -= (u64)nr_full_windows * (u64)window_size;
|
||||||
|
|
||||||
/* Process (window_start - mark_start) first */
|
/* Process (window_start - mark_start) first */
|
||||||
add_to_task_demand(rq, p,
|
add_to_task_demand(rq, p, window_start - mark_start);
|
||||||
wait_adjust(p, window_start - mark_start, event));
|
|
||||||
|
|
||||||
/* Push new sample(s) into task's demand history */
|
/* Push new sample(s) into task's demand history */
|
||||||
update_history(rq, p, p->ravg.sum, 1, event);
|
update_history(rq, p, p->ravg.sum, 1, event);
|
||||||
if (nr_full_windows)
|
if (nr_full_windows)
|
||||||
update_history(rq, p, scale_exec_time(wait_adjust(p,
|
update_history(rq, p, scale_exec_time(window_size, rq),
|
||||||
window_size, event), rq), nr_full_windows, event);
|
nr_full_windows, event);
|
||||||
|
|
||||||
/* Roll window_start back to current to process any remainder
|
/* Roll window_start back to current to process any remainder
|
||||||
* in current window. */
|
* in current window. */
|
||||||
|
@ -1897,8 +1882,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
|
||||||
|
|
||||||
/* Process (wallclock - window_start) next */
|
/* Process (wallclock - window_start) next */
|
||||||
mark_start = window_start;
|
mark_start = window_start;
|
||||||
add_to_task_demand(rq, p,
|
add_to_task_demand(rq, p, wallclock - mark_start);
|
||||||
wait_adjust(p, wallclock - mark_start, event));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reflect task activity on its demand and cpu's busy time statistics */
|
/* Reflect task activity on its demand and cpu's busy time statistics */
|
||||||
|
|
|
@ -1373,7 +1373,7 @@ unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
|
||||||
unsigned int __read_mostly sysctl_sched_min_runtime = 0; /* 0 ms */
|
unsigned int __read_mostly sysctl_sched_min_runtime = 0; /* 0 ms */
|
||||||
u64 __read_mostly sched_min_runtime = 0; /* 0 ms */
|
u64 __read_mostly sched_min_runtime = 0; /* 0 ms */
|
||||||
|
|
||||||
unsigned int task_load(struct task_struct *p)
|
static inline unsigned int task_load(struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (sched_use_pelt)
|
if (sched_use_pelt)
|
||||||
return p->se.avg.runnable_avg_sum_scaled;
|
return p->se.avg.runnable_avg_sum_scaled;
|
||||||
|
|
|
@ -883,7 +883,6 @@ extern unsigned int up_down_migrate_scale_factor;
|
||||||
|
|
||||||
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
||||||
extern void fixup_nr_big_small_task(int cpu, int reset_stats);
|
extern void fixup_nr_big_small_task(int cpu, int reset_stats);
|
||||||
unsigned int task_load(struct task_struct *p);
|
|
||||||
unsigned int max_task_load(void);
|
unsigned int max_task_load(void);
|
||||||
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||||
u64 delta, u64 wallclock);
|
u64 delta, u64 wallclock);
|
||||||
|
|
Loading…
Reference in New Issue