mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
[PATCH] Change synchronize_kernel to _rcu and _sched
This patch changes calls to synchronize_kernel(), deprecated in the earlier "Deprecate synchronize_kernel, GPL replacement" patch to instead call the new synchronize_rcu() and synchronize_sched() APIs. Signed-off-by: Paul E. McKenney <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
9b06e81898
commit
fbd568a3e6
16 changed files with 18 additions and 18 deletions
|
@ -36,7 +36,7 @@ static void timer_stop(void)
|
|||
{
|
||||
enable_timer_nmi_watchdog();
|
||||
unset_nmi_callback();
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* Allow already-started NMIs to complete. */
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
|
|||
/* We now sleep until all other CPUs have scheduled. This ensures that
|
||||
* the deletion is seen by all other CPUs, and that the deleted handler
|
||||
* isn't still running on another CPU when we return. */
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
|
|
|
@ -838,7 +838,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr)
|
|||
|
||||
/* Fall back to the default idle loop */
|
||||
pm_idle = pm_idle_save;
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
|
||||
|
||||
pr->flags.power = 0;
|
||||
result = acpi_processor_get_power_info(pr);
|
||||
|
|
|
@ -2199,7 +2199,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
|
|||
/* Wait until we know that we are out of any interrupt
|
||||
handlers might have been running before we freed the
|
||||
interrupt. */
|
||||
synchronize_kernel();
|
||||
synchronize_sched();
|
||||
|
||||
if (new_smi->si_sm) {
|
||||
if (new_smi->handlers)
|
||||
|
@ -2312,7 +2312,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
|
|||
/* Wait until we know that we are out of any interrupt
|
||||
handlers might have been running before we freed the
|
||||
interrupt. */
|
||||
synchronize_kernel();
|
||||
synchronize_sched();
|
||||
|
||||
/* Wait for the timer to stop. This avoids problems with race
|
||||
conditions removing the timer here. */
|
||||
|
|
|
@ -678,7 +678,7 @@ static void atkbd_disconnect(struct serio *serio)
|
|||
atkbd_disable(atkbd);
|
||||
|
||||
/* make sure we don't have a command in flight */
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */
|
||||
flush_scheduled_work();
|
||||
|
||||
device_remove_file(&serio->dev, &atkbd_attr_extra);
|
||||
|
|
|
@ -355,7 +355,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
|
|||
goto abort;
|
||||
}
|
||||
p->rdev = NULL;
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
if (atomic_read(&rdev->nr_pending)) {
|
||||
/* lost the race, try later */
|
||||
err = -EBUSY;
|
||||
|
|
|
@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
|
|||
goto abort;
|
||||
}
|
||||
p->rdev = NULL;
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
if (atomic_read(&rdev->nr_pending)) {
|
||||
/* lost the race, try later */
|
||||
err = -EBUSY;
|
||||
|
|
|
@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
|
|||
goto abort;
|
||||
}
|
||||
p->rdev = NULL;
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
if (atomic_read(&rdev->nr_pending)) {
|
||||
/* lost the race, try later */
|
||||
err = -EBUSY;
|
||||
|
|
|
@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
|
|||
goto abort;
|
||||
}
|
||||
p->rdev = NULL;
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
if (atomic_read(&rdev->nr_pending)) {
|
||||
/* lost the race, try later */
|
||||
err = -EBUSY;
|
||||
|
|
|
@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number)
|
|||
goto abort;
|
||||
}
|
||||
p->rdev = NULL;
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
if (atomic_read(&rdev->nr_pending)) {
|
||||
/* lost the race, try later */
|
||||
err = -EBUSY;
|
||||
|
|
|
@ -2385,7 +2385,7 @@ core_down:
|
|||
}
|
||||
|
||||
/* Give a racing hard_start_xmit a few cycles to complete. */
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
|
||||
|
||||
/*
|
||||
* And now for the 50k$ question: are IRQ disabled or not ?
|
||||
|
|
|
@ -45,7 +45,7 @@ s390_register_adapter_interrupt (adapter_int_handler_t handler)
|
|||
else
|
||||
ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
|
||||
if (!ret)
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* Allow interrupts to complete. */
|
||||
|
||||
sprintf (dbf_txt, "ret:%d", ret);
|
||||
CIO_TRACE_EVENT (4, dbf_txt);
|
||||
|
@ -65,7 +65,7 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
|
|||
ret = -EINVAL;
|
||||
else {
|
||||
adapter_handler = NULL;
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* Allow interrupts to complete. */
|
||||
ret = 0;
|
||||
}
|
||||
sprintf (dbf_txt, "ret:%d", ret);
|
||||
|
|
|
@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod,
|
|||
/* Init routine failed: abort. Try to protect us from
|
||||
buggy refcounters. */
|
||||
mod->state = MODULE_STATE_GOING;
|
||||
synchronize_kernel();
|
||||
synchronize_sched();
|
||||
if (mod->unsafe)
|
||||
printk(KERN_ERR "%s: module is now stuck!\n",
|
||||
mod->name);
|
||||
|
|
|
@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
|
|||
WARN_ON(hook != timer_hook);
|
||||
timer_hook = NULL;
|
||||
/* make sure all CPUs see the NULL hook */
|
||||
synchronize_kernel();
|
||||
synchronize_sched(); /* Allow ongoing interrupts to complete. */
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(register_timer_hook);
|
||||
|
|
|
@ -1666,7 +1666,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
|
|||
}
|
||||
|
||||
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
|
||||
/* no cpu_online check required here since we clear the percpu
|
||||
* array on cpu offline and set this to NULL.
|
||||
|
|
|
@ -3091,7 +3091,7 @@ void free_netdev(struct net_device *dev)
|
|||
void synchronize_net(void)
|
||||
{
|
||||
might_sleep();
|
||||
synchronize_kernel();
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue