mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
cpufreq: ondemand: change freq sync code to use per-CPU kthreads
The migration notifier may run in a context where it is not safe to enqueue a work item. For example: (__queue_work+0x1f4/0x40c) (queue_work_on+0x34/0x44) (dbs_migration_notify+0x2c/0x40) (notifier_call_chain+0x38/0x68) (__atomic_notifier_call_chain+0x34/0x44) (atomic_notifier_call_chain+0x14/0x18) (try_to_wake_up+0x350/0x36c) (autoremove_wake_function+0xc/0x34) (__wake_up_common+0x48/0x7c) (__wake_up+0x3c/0x50) (trustee_thread+0x1d0/0x528) (kthread+0x80/0x8c) The trustee code already takes a lock internal to the workqueue implementation and is not expecting a wake up to come around and enter the workqueue code again, where the same lock will be needed. Instead of relying on a workqueue, use a per-CPU kthread to do the frequency syncing. Change-Id: I7555ee40867792fa8ec4ea8f9a6309323775e797 Signed-off-by: Steve Muckle <smuckle@codeaurora.org> Signed-off-by: Ajay Dudani <adudani@codeaurora.org>
This commit is contained in:
parent
a554e38f94
commit
2513f53e24
1 changed files with 98 additions and 72 deletions
|
@ -22,6 +22,7 @@
|
|||
#include <linux/hrtimer.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
@ -103,6 +104,11 @@ struct cpu_dbs_info_s {
|
|||
* when user is changing the governor or limits.
|
||||
*/
|
||||
struct mutex timer_mutex;
|
||||
|
||||
struct task_struct *sync_thread;
|
||||
wait_queue_head_t sync_wq;
|
||||
atomic_t src_sync_cpu;
|
||||
atomic_t sync_enabled;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
|
||||
|
||||
|
@ -120,14 +126,6 @@ static struct workqueue_struct *dbs_wq;
|
|||
|
||||
static DEFINE_PER_CPU(struct work_struct, dbs_refresh_work);
|
||||
|
||||
struct dbs_sync_work_struct {
|
||||
struct work_struct work;
|
||||
unsigned int src_cpu;
|
||||
unsigned int targ_cpu;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct dbs_sync_work_struct, dbs_sync_work);
|
||||
|
||||
static struct dbs_tuners {
|
||||
unsigned int sampling_rate;
|
||||
unsigned int up_threshold;
|
||||
|
@ -1023,12 +1021,11 @@ bail_acq_sema_failed:
|
|||
static int dbs_migration_notify(struct notifier_block *nb,
|
||||
unsigned long target_cpu, void *arg)
|
||||
{
|
||||
struct dbs_sync_work_struct *sync_work =
|
||||
&per_cpu(dbs_sync_work, target_cpu);
|
||||
sync_work->src_cpu = (unsigned int)arg;
|
||||
struct cpu_dbs_info_s *target_dbs_info =
|
||||
&per_cpu(od_cpu_dbs_info, target_cpu);
|
||||
|
||||
queue_work_on(target_cpu, dbs_wq,
|
||||
&per_cpu(dbs_sync_work, target_cpu).work);
|
||||
atomic_set(&target_dbs_info->src_sync_cpu, (int)arg);
|
||||
wake_up(&target_dbs_info->sync_wq);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -1037,73 +1034,91 @@ static struct notifier_block dbs_migration_nb = {
|
|||
.notifier_call = dbs_migration_notify,
|
||||
};
|
||||
|
||||
void dbs_synchronize(struct work_struct *work)
|
||||
static int sync_pending(struct cpu_dbs_info_s *this_dbs_info)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
struct cpu_dbs_info_s *this_dbs_info, *src_dbs_info;
|
||||
struct dbs_sync_work_struct *dbs_work;
|
||||
unsigned int cpu, src_cpu;
|
||||
return atomic_read(&this_dbs_info->src_sync_cpu) >= 0;
|
||||
}
|
||||
|
||||
static int dbs_sync_thread(void *data)
|
||||
{
|
||||
int src_cpu, cpu = (int)data;
|
||||
unsigned int src_freq, src_max_load;
|
||||
struct cpu_dbs_info_s *this_dbs_info, *src_dbs_info;
|
||||
struct cpufreq_policy *policy;
|
||||
int delay;
|
||||
|
||||
dbs_work = container_of(work, struct dbs_sync_work_struct, work);
|
||||
cpu = dbs_work->targ_cpu;
|
||||
src_cpu = dbs_work->src_cpu;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
/* Getting source cpu info */
|
||||
src_dbs_info = &per_cpu(od_cpu_dbs_info, src_cpu);
|
||||
if (src_dbs_info != NULL && src_dbs_info->cur_policy != NULL) {
|
||||
src_freq = src_dbs_info->cur_policy->cur;
|
||||
src_max_load = src_dbs_info->max_load;
|
||||
} else {
|
||||
src_freq = dbs_tuners_ins.sync_freq;
|
||||
src_max_load = 0;
|
||||
}
|
||||
|
||||
if (lock_policy_rwsem_write(cpu) < 0)
|
||||
goto bail_acq_sema_failed;
|
||||
|
||||
this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
policy = this_dbs_info->cur_policy;
|
||||
if (!policy) {
|
||||
/* CPU not using ondemand governor */
|
||||
goto bail_incorrect_governor;
|
||||
}
|
||||
|
||||
delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
while (1) {
|
||||
wait_event(this_dbs_info->sync_wq,
|
||||
sync_pending(this_dbs_info) ||
|
||||
kthread_should_stop());
|
||||
|
||||
if (policy->cur < src_freq) {
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
/* Cancelling the next ondemand sample */
|
||||
cancel_delayed_work_sync(&this_dbs_info->work);
|
||||
get_online_cpus();
|
||||
|
||||
/*
|
||||
* Arch specific cpufreq driver may fail.
|
||||
* Don't update governor frequency upon failure.
|
||||
*/
|
||||
if (__cpufreq_driver_target(policy, src_freq,
|
||||
CPUFREQ_RELATION_L) >= 0) {
|
||||
policy->cur = src_freq;
|
||||
if (src_max_load > this_dbs_info->max_load) {
|
||||
this_dbs_info->max_load = src_max_load;
|
||||
this_dbs_info->prev_load = src_max_load;
|
||||
}
|
||||
if (!atomic_read(&this_dbs_info->sync_enabled)) {
|
||||
atomic_set(&this_dbs_info->src_sync_cpu, -1);
|
||||
put_online_cpus();
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Rescheduling the next ondemand sample */
|
||||
mutex_lock(&this_dbs_info->timer_mutex);
|
||||
schedule_delayed_work_on(cpu, &this_dbs_info->work,
|
||||
delay);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
}
|
||||
bail_incorrect_governor:
|
||||
unlock_policy_rwsem_write(cpu);
|
||||
src_cpu = atomic_read(&this_dbs_info->src_sync_cpu);
|
||||
src_dbs_info = &per_cpu(od_cpu_dbs_info, src_cpu);
|
||||
if (src_dbs_info != NULL &&
|
||||
src_dbs_info->cur_policy != NULL) {
|
||||
src_freq = src_dbs_info->cur_policy->cur;
|
||||
src_max_load = src_dbs_info->max_load;
|
||||
} else {
|
||||
src_freq = dbs_tuners_ins.sync_freq;
|
||||
src_max_load = 0;
|
||||
}
|
||||
|
||||
if (lock_policy_rwsem_write(cpu) < 0)
|
||||
goto bail_acq_sema_failed;
|
||||
|
||||
policy = this_dbs_info->cur_policy;
|
||||
if (!policy) {
|
||||
/* CPU not using ondemand governor */
|
||||
goto bail_incorrect_governor;
|
||||
}
|
||||
delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
|
||||
|
||||
if (policy->cur < src_freq) {
|
||||
/* cancel the next ondemand sample */
|
||||
cancel_delayed_work_sync(&this_dbs_info->work);
|
||||
|
||||
/*
|
||||
* Arch specific cpufreq driver may fail.
|
||||
* Don't update governor frequency upon failure.
|
||||
*/
|
||||
if (__cpufreq_driver_target(policy, src_freq,
|
||||
CPUFREQ_RELATION_L) >= 0) {
|
||||
policy->cur = src_freq;
|
||||
if (src_max_load > this_dbs_info->max_load) {
|
||||
this_dbs_info->max_load = src_max_load;
|
||||
this_dbs_info->prev_load = src_max_load;
|
||||
}
|
||||
}
|
||||
|
||||
/* reschedule the next ondemand sample */
|
||||
mutex_lock(&this_dbs_info->timer_mutex);
|
||||
schedule_delayed_work_on(cpu, &this_dbs_info->work,
|
||||
delay);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
}
|
||||
|
||||
bail_incorrect_governor:
|
||||
unlock_policy_rwsem_write(cpu);
|
||||
bail_acq_sema_failed:
|
||||
put_online_cpus();
|
||||
return;
|
||||
put_online_cpus();
|
||||
atomic_set(&this_dbs_info->src_sync_cpu, -1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dbs_input_event(struct input_handle *handle, unsigned int type,
|
||||
|
@ -1199,6 +1214,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (dbs_tuners_ins.ignore_nice)
|
||||
j_dbs_info->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
set_cpus_allowed(j_dbs_info->sync_thread,
|
||||
*cpumask_of(j));
|
||||
atomic_set(&j_dbs_info->sync_enabled, 1);
|
||||
}
|
||||
this_dbs_info->cpu = cpu;
|
||||
this_dbs_info->rate_mult = 1;
|
||||
|
@ -1256,6 +1274,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
mutex_lock(&dbs_mutex);
|
||||
mutex_destroy(&this_dbs_info->timer_mutex);
|
||||
dbs_enable--;
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
|
||||
atomic_set(&j_dbs_info->sync_enabled, 0);
|
||||
}
|
||||
|
||||
/* If device is being removed, policy is no longer
|
||||
* valid. */
|
||||
this_dbs_info->cur_policy = NULL;
|
||||
|
@ -1325,16 +1350,16 @@ static int __init cpufreq_gov_dbs_init(void)
|
|||
for_each_possible_cpu(i) {
|
||||
struct cpu_dbs_info_s *this_dbs_info =
|
||||
&per_cpu(od_cpu_dbs_info, i);
|
||||
struct dbs_sync_work_struct *dbs_sync =
|
||||
&per_cpu(dbs_sync_work, i);
|
||||
|
||||
mutex_init(&this_dbs_info->timer_mutex);
|
||||
INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
|
||||
|
||||
INIT_WORK(&dbs_sync->work, dbs_synchronize);
|
||||
dbs_sync->src_cpu = 0;
|
||||
dbs_sync->targ_cpu = i;
|
||||
atomic_set(&this_dbs_info->src_sync_cpu, -1);
|
||||
init_waitqueue_head(&this_dbs_info->sync_wq);
|
||||
|
||||
this_dbs_info->sync_thread = kthread_run(dbs_sync_thread,
|
||||
(void *)i,
|
||||
"dbs_sync/%d", i);
|
||||
}
|
||||
|
||||
return cpufreq_register_governor(&cpufreq_gov_ondemand);
|
||||
|
@ -1348,6 +1373,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
|
|||
struct cpu_dbs_info_s *this_dbs_info =
|
||||
&per_cpu(od_cpu_dbs_info, i);
|
||||
mutex_destroy(&this_dbs_info->timer_mutex);
|
||||
kthread_stop(this_dbs_info->sync_thread);
|
||||
}
|
||||
destroy_workqueue(dbs_wq);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue