mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
[PATCH] workqueue: fix schedule_on_each_cpu()
fix the schedule_on_each_cpu() implementation: __queue_work() is now stricter, hence set the work-pending bit before passing in the new work. (found in the -rt tree, using Peter Zijlstra's files-lock scalability patchset) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
5ccac88eeb
commit
9bfb18392e
1 changed files with 5 additions and 3 deletions
|
@ -637,9 +637,11 @@ int schedule_on_each_cpu(work_func_t func)
|
||||||
|
|
||||||
mutex_lock(&workqueue_mutex);
|
mutex_lock(&workqueue_mutex);
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
INIT_WORK(per_cpu_ptr(works, cpu), func);
|
struct work_struct *work = per_cpu_ptr(works, cpu);
|
||||||
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
|
|
||||||
per_cpu_ptr(works, cpu));
|
INIT_WORK(work, func);
|
||||||
|
set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
|
||||||
|
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
|
||||||
}
|
}
|
||||||
mutex_unlock(&workqueue_mutex);
|
mutex_unlock(&workqueue_mutex);
|
||||||
flush_workqueue(keventd_wq);
|
flush_workqueue(keventd_wq);
|
||||||
|
|
Loading…
Reference in a new issue