ACPI / processor: Rework processor throttling with work_on_cpu()

commit f3ca4164529b875374c410193bbbac0ee960895f upstream.

acpi_processor_set_throttling() uses set_cpus_allowed_ptr() to make
sure that the (struct acpi_processor)->acpi_processor_set_throttling()
callback will run on the right CPU.  However, the function may be
called from a worker thread already bound to a different CPU in which
case that won't work.

Make acpi_processor_set_throttling() use work_on_cpu() as appropriate
instead of abusing set_cpus_allowed_ptr().

Reported-and-tested-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
[rjw: Changelog]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Lan Tianyu 2014-02-26 21:03:05 +08:00 committed by Greg Kroah-Hartman
parent 3cb947fdcf
commit 44ae49aa88
1 changed files with 32 additions and 37 deletions

View File

@ -59,6 +59,12 @@ struct throttling_tstate {
int target_state; /* target T-state */
};
struct acpi_processor_throttling_arg {
struct acpi_processor *pr;
int target_state;
bool force;
};
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
@ -1063,16 +1069,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
static long acpi_processor_throttling_fn(void *data)
{
struct acpi_processor_throttling_arg *arg = data;
struct acpi_processor *pr = arg->pr;
return pr->throttling.acpi_processor_set_throttling(pr,
arg->target_state, arg->force);
}
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@ -1083,14 +1097,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
free_cpumask_var(saved_mask);
return -ENOMEM;
}
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
@ -1099,17 +1105,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return -ENODEV;
}
cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
cpumask_and(online_throttling_cpus, cpu_online_mask,
p_throttling->shared_cpu_map);
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
for_each_cpu(i, online_throttling_cpus) {
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@ -1121,21 +1125,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
/* Can't migrate to the pr->id CPU. Exit */
ret = -ENODEV;
goto exit;
}
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state, force);
arg.pr = pr;
arg.target_state = state;
arg.force = force;
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
for_each_cpu(i, online_throttling_cpus) {
for_each_cpu_and(i, cpu_online_mask,
p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@ -1156,13 +1157,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
"on CPU %d\n", i));
continue;
}
t_state.cpu = i;
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(i)))
continue;
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state, force);
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
&arg);
}
}
/*
@ -1171,17 +1171,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
for_each_cpu(i, online_throttling_cpus) {
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
/* restore the previous state */
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, saved_mask);
exit:
free_cpumask_var(online_throttling_cpus);
free_cpumask_var(saved_mask);
return ret;
}