diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index f4550a9dbf36..c3075416276a 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -19,7 +19,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ extern int cpuset_init(void); extern void cpuset_init_smp(void); +extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(bool cpu_online); +extern void cpuset_wait_for_hotplug(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); @@ -122,11 +124,15 @@ static inline void set_mems_allowed(nodemask_t nodemask) static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} +static inline void cpuset_force_rebuild(void) { } + static inline void cpuset_update_active_cpus(bool cpu_online) { partition_sched_domains(1, NULL, NULL); } +static inline void cpuset_wait_for_hotplug(void) { } + static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index dc6017cd068e..be54ed07159d 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2151,6 +2151,13 @@ static void schedule_cpuset_propagate_hotplug(struct cpuset *cs) css_put(&cs->css); } +static bool force_rebuild; + +void cpuset_force_rebuild(void) +{ + force_rebuild = true; +} + /** * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset * @@ -2222,8 +2229,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work) flush_workqueue(cpuset_propagate_hotplug_wq); /* rebuild sched domains if cpus_allowed has changed */ - if (cpus_updated) + if (cpus_updated || force_rebuild) { + force_rebuild = false; rebuild_sched_domains(); + } } void cpuset_update_active_cpus(bool cpu_online) @@ -2242,6 +2251,11 @@ void cpuset_update_active_cpus(bool cpu_online) schedule_work(&cpuset_hotplug_work); } +void cpuset_wait_for_hotplug(void) +{ + flush_work(&cpuset_hotplug_work); +} + /* * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. * Call this routine anytime after node_states[N_MEMORY] changes. diff --git a/kernel/power/process.c b/kernel/power/process.c index 0cbeb7c4d102..8dd59eb0f3ca 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -18,7 +18,9 @@ #include #include #include -/* +#include + +/* * Timeout for stopping processes */ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; @@ -241,6 +243,8 @@ void thaw_processes(void) __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); + cpuset_wait_for_hotplug(); + read_lock(&tasklist_lock); do_each_thread(g, p) { __thaw_task(p); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a8d603ab894f..2dec899502da 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9975,17 +9975,16 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, * operation in the resume sequence, just build a single sched * domain, ignoring cpusets. */ - num_cpus_frozen--; - if (likely(num_cpus_frozen)) { - partition_sched_domains(1, NULL, NULL); + partition_sched_domains(1, NULL, NULL); + if (--num_cpus_frozen) break; - } /* * This is the last CPU online operation. So fall through and * restore the original sched domains by considering the * cpuset configurations. */ + cpuset_force_rebuild(); case CPU_ONLINE: case CPU_DOWN_FAILED: