From fffdb903c91caf0166f5f45bc43025cfa2a25fde Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 26 Jun 2015 08:44:28 +0530 Subject: [PATCH] Revert "sched: Use only partial wait time as task demand" This reverts commit 14fd2e5918f1 ("sched: Use only partial wait time as task demand") as it causes performance regression. Change-Id: Iaddfce9c98bff328f50d746c9a86a0c8c34aa0b9 Signed-off-by: Joonwoo Park [pkondeti@codeaurora.org: Resolved minor conflict] Signed-off-by: Pavankumar Kondeti --- Documentation/scheduler/sched-hmp.txt | 11 +------- kernel/sched/core.c | 36 ++++++++------------------- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 1 - 4 files changed, 12 insertions(+), 38 deletions(-) diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt index 66104969f952..1e96aba3fe7f 100644 --- a/Documentation/scheduler/sched-hmp.txt +++ b/Documentation/scheduler/sched-hmp.txt @@ -1350,17 +1350,8 @@ Appears at: /proc/sys/kernel/sched_account_wait_time Default value: 1 -This controls whether a tasks wait time is accounted as its demand for cpu +This controls whether a task's wait time is accounted as its demand for cpu and thus the values found in its sum, sum_history[] and demand attributes. -The load tracking algorithm only considers part of a tasks wait time as its -demand. The portion of wait time accounted as demand is determined by each -tasks percent load, i.e. a task that waits for 10ms and has 60 % task load, -only 6 ms of the wait will contribute to task demand. This approach is fair -as the scheduler tries to determine how much of its wait time would a task -actually have been using the CPU if it had been executing. It ensures that -tasks with high demand continue to see most of the benefits of accounting -wait time as busy time, however, lower demand tasks don't experience a -disproportionately high boost to demand. *** 7.16 sched_freq_account_wait_time diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f975e90ac7cf..2e908f8ed244 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1693,23 +1693,19 @@ static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq, static int account_busy_for_task_demand(struct task_struct *p, int event) { + /* No need to bother updating task demand for exiting tasks + * or the idle task. */ if (exiting_task(p) || is_idle_task(p)) return 0; - /* - * When a task is waking up it is completing a segment of non-busy + /* When a task is waking up it is completing a segment of non-busy * time. Likewise, if wait time is not treated as busy time, then * when a task begins to run or is migrated, it is not running and - * is completing a segment of non-busy time. - */ + * is completing a segment of non-busy time. */ if (event == TASK_WAKE || (!sched_account_wait_time && - (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) + (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) return 0; - /* - * We are left with TASK_UPDATE, IRQ_UPDATE, PUT_PREV_TASK and - * wait time being accounted as busy time. - */ return 1; } @@ -1787,15 +1783,6 @@ static void add_to_task_demand(struct rq *rq, struct task_struct *p, p->ravg.sum = sched_ravg_window; } -static u64 wait_adjust(struct task_struct *p, u64 delta, int event) -{ - /* We already know that wait time counts as busy time. */ - if (event == PICK_NEXT_TASK || event == TASK_MIGRATE) - return div64_u64(delta * task_load(p), max_task_load()); - - return delta; -} - /* * Account cpu demand of task and/or update task's cpu demand history * @@ -1870,8 +1857,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq, if (!new_window) { /* The simple case - busy time contained within the existing * window. */ - add_to_task_demand(rq, p, wait_adjust(p, - wallclock - mark_start, event)); + add_to_task_demand(rq, p, wallclock - mark_start); return; } @@ -1882,14 +1868,13 @@ static void update_task_demand(struct task_struct *p, struct rq *rq, window_start -= (u64)nr_full_windows * (u64)window_size; /* Process (window_start - mark_start) first */ - add_to_task_demand(rq, p, - wait_adjust(p, window_start - mark_start, event)); + add_to_task_demand(rq, p, window_start - mark_start); /* Push new sample(s) into task's demand history */ update_history(rq, p, p->ravg.sum, 1, event); if (nr_full_windows) - update_history(rq, p, scale_exec_time(wait_adjust(p, - window_size, event), rq), nr_full_windows, event); + update_history(rq, p, scale_exec_time(window_size, rq), + nr_full_windows, event); /* Roll window_start back to current to process any remainder * in current window. */ @@ -1897,8 +1882,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq, /* Process (wallclock - window_start) next */ mark_start = window_start; - add_to_task_demand(rq, p, - wait_adjust(p, wallclock - mark_start, event)); + add_to_task_demand(rq, p, wallclock - mark_start); } /* Reflect task activity on its demand and cpu's busy time statistics */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bca94b01c036..c41a208f6d8f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1373,7 +1373,7 @@ unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15; unsigned int __read_mostly sysctl_sched_min_runtime = 0; /* 0 ms */ u64 __read_mostly sched_min_runtime = 0; /* 0 ms */ -unsigned int task_load(struct task_struct *p) +static inline unsigned int task_load(struct task_struct *p) { if (sched_use_pelt) return p->se.avg.runnable_avg_sum_scaled; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0af5fff79dde..1576e9689363 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -883,7 +883,6 @@ extern unsigned int up_down_migrate_scale_factor; extern void reset_cpu_hmp_stats(int cpu, int reset_cra); extern void fixup_nr_big_small_task(int cpu, int reset_stats); -unsigned int task_load(struct task_struct *p); unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock);