Merge branch 'syscore' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6

* 'syscore' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
  Introduce ARCH_NO_SYSDEV_OPS config option (v2)
  cpufreq: Use syscore_ops for boot CPU suspend/resume (v2)
  KVM: Use syscore_ops instead of sysdev class and sysdev
  PCI / Intel IOMMU: Use syscore_ops instead of sysdev class and sysdev
  timekeeping: Use syscore_ops instead of sysdev class and sysdev
  x86: Use syscore_ops instead of sysdev classes and sysdevs
This commit is contained in:
Linus Torvalds 2011-03-25 21:07:59 -07:00
commit 16c29dafcc
20 changed files with 211 additions and 356 deletions

View File

@ -71,6 +71,7 @@ config X86
select GENERIC_IRQ_SHOW
select IRQ_FORCED_THREADING
select USE_GENERIC_SMP_HELPERS if SMP
select ARCH_NO_SYSDEV_OPS
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)

View File

@ -21,7 +21,7 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <asm/pci-direct.h>
@ -1260,7 +1260,7 @@ static void disable_iommus(void)
* disable suspend until real resume implemented
*/
static int amd_iommu_resume(struct sys_device *dev)
static void amd_iommu_resume(void)
{
struct amd_iommu *iommu;
@ -1276,11 +1276,9 @@ static int amd_iommu_resume(struct sys_device *dev)
*/
amd_iommu_flush_all_devices();
amd_iommu_flush_all_domains();
return 0;
}
static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
static int amd_iommu_suspend(void)
{
/* disable IOMMUs to go out of the way for BIOS */
disable_iommus();
@ -1288,17 +1286,11 @@ static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
return 0;
}
static struct sysdev_class amd_iommu_sysdev_class = {
.name = "amd_iommu",
static struct syscore_ops amd_iommu_syscore_ops = {
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
};
static struct sys_device device_amd_iommu = {
.id = 0,
.cls = &amd_iommu_sysdev_class,
};
/*
* This is the core init function for AMD IOMMU hardware in the system.
* This function is called from the generic x86 DMA layer initialization
@ -1415,14 +1407,6 @@ static int __init amd_iommu_init(void)
goto free;
}
ret = sysdev_class_register(&amd_iommu_sysdev_class);
if (ret)
goto free;
ret = sysdev_register(&device_amd_iommu);
if (ret)
goto free;
ret = amd_iommu_init_devices();
if (ret)
goto free;
@ -1441,6 +1425,8 @@ static int __init amd_iommu_init(void)
amd_iommu_init_notifier();
register_syscore_ops(&amd_iommu_syscore_ops);
if (iommu_pass_through)
goto out;

View File

@ -24,7 +24,7 @@
#include <linux/ftrace.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/dmar.h>
@ -2046,7 +2046,7 @@ static struct {
unsigned int apic_thmr;
} apic_pm_state;
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
static int lapic_suspend(void)
{
unsigned long flags;
int maxlvt;
@ -2084,23 +2084,21 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
return 0;
}
static int lapic_resume(struct sys_device *dev)
static void lapic_resume(void)
{
unsigned int l, h;
unsigned long flags;
int maxlvt;
int ret = 0;
int maxlvt, ret;
struct IO_APIC_route_entry **ioapic_entries = NULL;
if (!apic_pm_state.active)
return 0;
return;
local_irq_save(flags);
if (intr_remapping_enabled) {
ioapic_entries = alloc_ioapic_entries();
if (!ioapic_entries) {
WARN(1, "Alloc ioapic_entries in lapic resume failed.");
ret = -ENOMEM;
goto restore;
}
@ -2162,8 +2160,6 @@ static int lapic_resume(struct sys_device *dev)
}
restore:
local_irq_restore(flags);
return ret;
}
/*
@ -2171,17 +2167,11 @@ restore:
* are needed on every CPU up until machine_halt/restart/poweroff.
*/
static struct sysdev_class lapic_sysclass = {
.name = "lapic",
static struct syscore_ops lapic_syscore_ops = {
.resume = lapic_resume,
.suspend = lapic_suspend,
};
static struct sys_device device_lapic = {
.id = 0,
.cls = &lapic_sysclass,
};
static void __cpuinit apic_pm_activate(void)
{
apic_pm_state.active = 1;
@ -2189,16 +2179,11 @@ static void __cpuinit apic_pm_activate(void)
static int __init init_lapic_sysfs(void)
{
int error;
if (!cpu_has_apic)
return 0;
/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
if (cpu_has_apic)
register_syscore_ops(&lapic_syscore_ops);
error = sysdev_class_register(&lapic_sysclass);
if (!error)
error = sysdev_register(&device_lapic);
return error;
return 0;
}
/* local apic needs to resume before other devices access its registers. */

View File

@ -30,7 +30,7 @@
#include <linux/compiler.h>
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/msi.h>
#include <linux/htirq.h>
#include <linux/freezer.h>
@ -2918,89 +2918,84 @@ static int __init io_apic_bug_finalize(void)
late_initcall(io_apic_bug_finalize);
struct sysfs_ioapic_data {
struct sys_device dev;
struct IO_APIC_route_entry entry[0];
};
static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
static struct IO_APIC_route_entry *ioapic_saved_data[MAX_IO_APICS];
static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
static void suspend_ioapic(int ioapic_id)
{
struct IO_APIC_route_entry *entry;
struct sysfs_ioapic_data *data;
struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id];
int i;
data = container_of(dev, struct sysfs_ioapic_data, dev);
entry = data->entry;
for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
*entry = ioapic_read_entry(dev->id, i);
if (!saved_data)
return;
for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++)
saved_data[i] = ioapic_read_entry(ioapic_id, i);
}
static int ioapic_suspend(void)
{
int ioapic_id;
for (ioapic_id = 0; ioapic_id < nr_ioapics; ioapic_id++)
suspend_ioapic(ioapic_id);
return 0;
}
static int ioapic_resume(struct sys_device *dev)
static void resume_ioapic(int ioapic_id)
{
struct IO_APIC_route_entry *entry;
struct sysfs_ioapic_data *data;
struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id];
unsigned long flags;
union IO_APIC_reg_00 reg_00;
int i;
data = container_of(dev, struct sysfs_ioapic_data, dev);
entry = data->entry;
if (!saved_data)
return;
raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0);
if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw);
reg_00.raw = io_apic_read(ioapic_id, 0);
if (reg_00.bits.ID != mp_ioapics[ioapic_id].apicid) {
reg_00.bits.ID = mp_ioapics[ioapic_id].apicid;
io_apic_write(ioapic_id, 0, reg_00.raw);
}
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
ioapic_write_entry(dev->id, i, entry[i]);
return 0;
for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++)
ioapic_write_entry(ioapic_id, i, saved_data[i]);
}
static struct sysdev_class ioapic_sysdev_class = {
.name = "ioapic",
static void ioapic_resume(void)
{
int ioapic_id;
for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--)
resume_ioapic(ioapic_id);
}
static struct syscore_ops ioapic_syscore_ops = {
.suspend = ioapic_suspend,
.resume = ioapic_resume,
};
static int __init ioapic_init_sysfs(void)
static int __init ioapic_init_ops(void)
{
struct sys_device * dev;
int i, size, error;
int i;
error = sysdev_class_register(&ioapic_sysdev_class);
if (error)
return error;
for (i = 0; i < nr_ioapics; i++) {
unsigned int size;
for (i = 0; i < nr_ioapics; i++ ) {
size = sizeof(struct sys_device) + nr_ioapic_registers[i]
size = nr_ioapic_registers[i]
* sizeof(struct IO_APIC_route_entry);
mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
if (!mp_ioapic_data[i]) {
printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
continue;
}
dev = &mp_ioapic_data[i]->dev;
dev->id = i;
dev->cls = &ioapic_sysdev_class;
error = sysdev_register(dev);
if (error) {
kfree(mp_ioapic_data[i]);
mp_ioapic_data[i] = NULL;
printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
continue;
}
ioapic_saved_data[i] = kzalloc(size, GFP_KERNEL);
if (!ioapic_saved_data[i])
pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
}
register_syscore_ops(&ioapic_syscore_ops);
return 0;
}
device_initcall(ioapic_init_sysfs);
device_initcall(ioapic_init_ops);
/*
* Dynamic irq allocate and deallocation

View File

@ -21,6 +21,7 @@
#include <linux/percpu.h>
#include <linux/string.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/sched.h>
@ -1749,14 +1750,14 @@ static int mce_disable_error_reporting(void)
return 0;
}
static int mce_suspend(struct sys_device *dev, pm_message_t state)
static int mce_suspend(void)
{
return mce_disable_error_reporting();
}
static int mce_shutdown(struct sys_device *dev)
static void mce_shutdown(void)
{
return mce_disable_error_reporting();
mce_disable_error_reporting();
}
/*
@ -1764,14 +1765,18 @@ static int mce_shutdown(struct sys_device *dev)
* Only one CPU is active at this time, the others get re-added later using
* CPU hotplug:
*/
static int mce_resume(struct sys_device *dev)
static void mce_resume(void)
{
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
return 0;
}
static struct syscore_ops mce_syscore_ops = {
.suspend = mce_suspend,
.shutdown = mce_shutdown,
.resume = mce_resume,
};
static void mce_cpu_restart(void *data)
{
del_timer_sync(&__get_cpu_var(mce_timer));
@ -1808,9 +1813,6 @@ static void mce_enable_ce(void *all)
}
static struct sysdev_class mce_sysclass = {
.suspend = mce_suspend,
.shutdown = mce_shutdown,
.resume = mce_resume,
.name = "machinecheck",
};
@ -2139,6 +2141,7 @@ static __init int mcheck_init_device(void)
return err;
}
register_syscore_ops(&mce_syscore_ops);
register_hotcpu_notifier(&mce_cpu_notifier);
misc_register(&mce_log_device);

View File

@ -45,6 +45,7 @@
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/syscore_ops.h>
#include <asm/processor.h>
#include <asm/e820.h>
@ -630,7 +631,7 @@ struct mtrr_value {
static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
static int mtrr_save(void)
{
int i;
@ -642,7 +643,7 @@ static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
return 0;
}
static int mtrr_restore(struct sys_device *sysdev)
static void mtrr_restore(void)
{
int i;
@ -653,12 +654,11 @@ static int mtrr_restore(struct sys_device *sysdev)
mtrr_value[i].ltype);
}
}
return 0;
}
static struct sysdev_driver mtrr_sysdev_driver = {
static struct syscore_ops mtrr_syscore_ops = {
.suspend = mtrr_save,
.resume = mtrr_restore,
};
@ -839,7 +839,7 @@ static int __init mtrr_init_finialize(void)
* TBD: is there any system with such CPU which supports
* suspend/resume? If no, we should remove the code.
*/
sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
register_syscore_ops(&mtrr_syscore_ops);
return 0;
}

View File

@ -10,7 +10,7 @@
*/
#include <linux/init.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <asm/dma.h>
@ -21,7 +21,7 @@
* in asm/dma.h.
*/
static int i8237A_resume(struct sys_device *dev)
static void i8237A_resume(void)
{
unsigned long flags;
int i;
@ -41,31 +41,15 @@ static int i8237A_resume(struct sys_device *dev)
enable_dma(4);
release_dma_lock(flags);
return 0;
}
static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
{
return 0;
}
static struct sysdev_class i8237_sysdev_class = {
.name = "i8237",
.suspend = i8237A_suspend,
static struct syscore_ops i8237_syscore_ops = {
.resume = i8237A_resume,
};
static struct sys_device device_i8237A = {
.id = 0,
.cls = &i8237_sysdev_class,
};
static int __init i8237A_init_sysfs(void)
static int __init i8237A_init_ops(void)
{
int error = sysdev_class_register(&i8237_sysdev_class);
if (!error)
error = sysdev_register(&device_i8237A);
return error;
register_syscore_ops(&i8237_syscore_ops);
return 0;
}
device_initcall(i8237A_init_sysfs);
device_initcall(i8237A_init_ops);

View File

@ -8,7 +8,7 @@
#include <linux/random.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h>
@ -245,20 +245,19 @@ static void save_ELCR(char *trigger)
trigger[1] = inb(0x4d1) & 0xDE;
}
static int i8259A_resume(struct sys_device *dev)
static void i8259A_resume(void)
{
init_8259A(i8259A_auto_eoi);
restore_ELCR(irq_trigger);
return 0;
}
static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
static int i8259A_suspend(void)
{
save_ELCR(irq_trigger);
return 0;
}
static int i8259A_shutdown(struct sys_device *dev)
static void i8259A_shutdown(void)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
@ -266,21 +265,14 @@ static int i8259A_shutdown(struct sys_device *dev)
*/
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
return 0;
}
static struct sysdev_class i8259_sysdev_class = {
.name = "i8259",
static struct syscore_ops i8259_syscore_ops = {
.suspend = i8259A_suspend,
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
static struct sys_device device_i8259A = {
.id = 0,
.cls = &i8259_sysdev_class,
};
static void mask_8259A(void)
{
unsigned long flags;
@ -399,17 +391,12 @@ struct legacy_pic default_legacy_pic = {
struct legacy_pic *legacy_pic = &default_legacy_pic;
static int __init i8259A_init_sysfs(void)
static int __init i8259A_init_ops(void)
{
int error;
if (legacy_pic == &default_legacy_pic)
register_syscore_ops(&i8259_syscore_ops);
if (legacy_pic != &default_legacy_pic)
return 0;
error = sysdev_class_register(&i8259_sysdev_class);
if (!error)
error = sysdev_register(&device_i8259A);
return error;
return 0;
}
device_initcall(i8259A_init_sysfs);
device_initcall(i8259A_init_ops);

View File

@ -82,6 +82,7 @@
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/syscore_ops.h>
#include <asm/microcode.h>
#include <asm/processor.h>
@ -438,33 +439,25 @@ static int mc_sysdev_remove(struct sys_device *sys_dev)
return 0;
}
static int mc_sysdev_resume(struct sys_device *dev)
{
int cpu = dev->id;
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (!cpu_online(cpu))
return 0;
/*
* All non-bootup cpus are still disabled,
* so only CPU 0 will apply ucode here.
*
* Moreover, there can be no concurrent
* updates from any other places at this point.
*/
WARN_ON(cpu != 0);
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
return 0;
}
static struct sysdev_driver mc_sysdev_driver = {
.add = mc_sysdev_add,
.remove = mc_sysdev_remove,
.resume = mc_sysdev_resume,
};
/**
* mc_bp_resume - Update boot CPU microcode during resume.
*/
static void mc_bp_resume(void)
{
int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
}
static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume,
};
static __cpuinit int
@ -542,6 +535,7 @@ static int __init microcode_init(void)
if (error)
return error;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
pr_info("Microcode Update Driver: v" MICROCODE_VERSION

View File

@ -27,7 +27,7 @@
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <asm/atomic.h>
@ -589,7 +589,7 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
aperture_alloc = aper_alloc;
}
static void gart_fixup_northbridges(struct sys_device *dev)
static void gart_fixup_northbridges(void)
{
int i;
@ -613,33 +613,20 @@ static void gart_fixup_northbridges(struct sys_device *dev)
}
}
static int gart_resume(struct sys_device *dev)
static void gart_resume(void)
{
pr_info("PCI-DMA: Resuming GART IOMMU\n");
gart_fixup_northbridges(dev);
gart_fixup_northbridges();
enable_gart_translations();
return 0;
}
static int gart_suspend(struct sys_device *dev, pm_message_t state)
{
return 0;
}
static struct sysdev_class gart_sysdev_class = {
.name = "gart",
.suspend = gart_suspend,
static struct syscore_ops gart_syscore_ops = {
.resume = gart_resume,
};
static struct sys_device device_gart = {
.cls = &gart_sysdev_class,
};
/*
* Private Northbridge GATT initialization in case we cannot use the
* AGP driver for some reason.
@ -650,7 +637,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
unsigned aper_base, new_aper_base;
struct pci_dev *dev;
void *gatt;
int i, error;
int i;
pr_info("PCI-DMA: Disabling AGP.\n");
@ -685,12 +672,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
agp_gatt_table = gatt;
error = sysdev_class_register(&gart_sysdev_class);
if (!error)
error = sysdev_register(&device_gart);
if (error)
panic("Could not register gart_sysdev -- "
"would corrupt data on next suspend");
register_syscore_ops(&gart_syscore_ops);
flush_gart();

View File

@ -15,7 +15,7 @@
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/oprofile.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/kdebug.h>
@ -536,7 +536,7 @@ static void nmi_shutdown(void)
#ifdef CONFIG_PM
static int nmi_suspend(struct sys_device *dev, pm_message_t state)
static int nmi_suspend(void)
{
/* Only one CPU left, just stop that one */
if (nmi_enabled == 1)
@ -544,49 +544,31 @@ static int nmi_suspend(struct sys_device *dev, pm_message_t state)
return 0;
}
static int nmi_resume(struct sys_device *dev)
static void nmi_resume(void)
{
if (nmi_enabled == 1)
nmi_cpu_start(NULL);
return 0;
}
static struct sysdev_class oprofile_sysclass = {
.name = "oprofile",
static struct syscore_ops oprofile_syscore_ops = {
.resume = nmi_resume,
.suspend = nmi_suspend,
};
static struct sys_device device_oprofile = {
.id = 0,
.cls = &oprofile_sysclass,
};
static int __init init_sysfs(void)
static void __init init_suspend_resume(void)
{
int error;
error = sysdev_class_register(&oprofile_sysclass);
if (error)
return error;
error = sysdev_register(&device_oprofile);
if (error)
sysdev_class_unregister(&oprofile_sysclass);
return error;
register_syscore_ops(&oprofile_syscore_ops);
}
static void exit_sysfs(void)
static void exit_suspend_resume(void)
{
sysdev_unregister(&device_oprofile);
sysdev_class_unregister(&oprofile_sysclass);
unregister_syscore_ops(&oprofile_syscore_ops);
}
#else
static inline int init_sysfs(void) { return 0; }
static inline void exit_sysfs(void) { }
static inline void init_suspend_resume(void) { }
static inline void exit_suspend_resume(void) { }
#endif /* CONFIG_PM */
@ -789,9 +771,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
mux_init(ops);
ret = init_sysfs();
if (ret)
return ret;
init_suspend_resume();
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
@ -799,5 +779,5 @@ int __init op_nmi_init(struct oprofile_operations *ops)
void op_nmi_exit(void)
{
exit_sysfs();
exit_suspend_resume();
}

View File

@ -168,4 +168,11 @@ config SYS_HYPERVISOR
bool
default n
config ARCH_NO_SYSDEV_OPS
bool
---help---
To be selected by architectures that don't use sysdev class or
sysdev driver power management (suspend/resume) and shutdown
operations.
endmenu

View File

@ -329,7 +329,7 @@ void sysdev_unregister(struct sys_device *sysdev)
}
#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
/**
* sysdev_shutdown - Shut down all system devices.
*
@ -524,6 +524,7 @@ int sysdev_resume(void)
return 0;
}
EXPORT_SYMBOL_GPL(sysdev_resume);
#endif /* CONFIG_ARCH_NO_SYSDEV_OPS */
int __init system_bus_init(void)
{

View File

@ -28,6 +28,7 @@
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/syscore_ops.h>
#include <trace/events/power.h>
@ -1340,35 +1341,31 @@ out:
}
EXPORT_SYMBOL(cpufreq_get);
static struct sysdev_driver cpufreq_sysdev_driver = {
.add = cpufreq_add_dev,
.remove = cpufreq_remove_dev,
};
/**
* cpufreq_suspend - let the low level driver prepare for suspend
* cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
*
* This function is only executed for the boot processor. The other CPUs
* have been put offline by means of CPU hotplug.
*/
static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
static int cpufreq_bp_suspend(void)
{
int ret = 0;
int cpu = sysdev->id;
int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy;
dprintk("suspending cpu %u\n", cpu);
if (!cpu_online(cpu))
return 0;
/* we may be lax here as interrupts are off. Nonetheless
* we need to grab the correct cpu policy, as to check
* whether we really run on this CPU.
*/
/* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return -EINVAL;
/* only handle each CPU group once */
if (unlikely(cpu_policy->cpu != cpu))
goto out;
return 0;
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy);
@ -1377,13 +1374,12 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
"step on CPU %u\n", cpu_policy->cpu);
}
out:
cpufreq_cpu_put(cpu_policy);
return ret;
}
/**
* cpufreq_resume - restore proper CPU frequency handling after resume
* cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
*
* 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
* 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
@ -1391,31 +1387,23 @@ out:
* what we believe it to be. This is a bit later than when it
* should be, but nonethteless it's better than calling
* cpufreq_driver->get() here which might re-enable interrupts...
*
* This function is only executed for the boot CPU. The other CPUs have not
* been turned on yet.
*/
static int cpufreq_resume(struct sys_device *sysdev)
static void cpufreq_bp_resume(void)
{
int ret = 0;
int cpu = sysdev->id;
int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy;
dprintk("resuming cpu %u\n", cpu);
if (!cpu_online(cpu))
return 0;
/* we may be lax here as interrupts are off. Nonetheless
* we need to grab the correct cpu policy, as to check
* whether we really run on this CPU.
*/
/* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return -EINVAL;
/* only handle each CPU group once */
if (unlikely(cpu_policy->cpu != cpu))
goto fail;
return;
if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(cpu_policy);
@ -1430,14 +1418,11 @@ static int cpufreq_resume(struct sys_device *sysdev)
fail:
cpufreq_cpu_put(cpu_policy);
return ret;
}
static struct sysdev_driver cpufreq_sysdev_driver = {
.add = cpufreq_add_dev,
.remove = cpufreq_remove_dev,
.suspend = cpufreq_suspend,
.resume = cpufreq_resume,
static struct syscore_ops cpufreq_syscore_ops = {
.suspend = cpufreq_bp_suspend,
.resume = cpufreq_bp_resume,
};
@ -2002,6 +1987,7 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create_and_add("cpufreq",
&cpu_sysdev_class.kset.kobj);
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}

View File

@ -36,7 +36,7 @@
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <asm/cacheflush.h>
@ -3135,7 +3135,7 @@ static void iommu_flush_all(void)
}
}
static int iommu_suspend(struct sys_device *dev, pm_message_t state)
static int iommu_suspend(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@ -3175,7 +3175,7 @@ nomem:
return -ENOMEM;
}
static int iommu_resume(struct sys_device *dev)
static void iommu_resume(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@ -3183,7 +3183,7 @@ static int iommu_resume(struct sys_device *dev)
if (init_iommu_hw()) {
WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
return -EIO;
return;
}
for_each_active_iommu(iommu, drhd) {
@ -3204,40 +3204,20 @@ static int iommu_resume(struct sys_device *dev)
for_each_active_iommu(iommu, drhd)
kfree(iommu->iommu_state);
return 0;
}
static struct sysdev_class iommu_sysclass = {
.name = "iommu",
static struct syscore_ops iommu_syscore_ops = {
.resume = iommu_resume,
.suspend = iommu_suspend,
};
static struct sys_device device_iommu = {
.cls = &iommu_sysclass,
};
static int __init init_iommu_sysfs(void)
static void __init init_iommu_pm_ops(void)
{
int error;
error = sysdev_class_register(&iommu_sysclass);
if (error)
return error;
error = sysdev_register(&device_iommu);
if (error)
sysdev_class_unregister(&iommu_sysclass);
return error;
register_syscore_ops(&iommu_syscore_ops);
}
#else
static int __init init_iommu_sysfs(void)
{
return 0;
}
static inline int init_iommu_pm_ops(void) { }
#endif /* CONFIG_PM */
/*
@ -3320,7 +3300,7 @@ int __init intel_iommu_init(void)
#endif
dma_ops = &intel_dma_ops;
init_iommu_sysfs();
init_iommu_pm_ops();
register_iommu(&intel_iommu_ops);

View File

@ -633,8 +633,12 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
/* drivers/base/power/shutdown.c */
extern void device_shutdown(void);
#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
/* drivers/base/sys.c */
extern void sysdev_shutdown(void);
#else
static inline void sysdev_shutdown(void) { }
#endif
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);

View File

@ -529,13 +529,19 @@ struct dev_power_domain {
*/
#ifdef CONFIG_PM_SLEEP
extern void device_pm_lock(void);
#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
extern int sysdev_suspend(pm_message_t state);
extern int sysdev_resume(void);
#else
static inline int sysdev_suspend(pm_message_t state) { return 0; }
static inline int sysdev_resume(void) { return 0; }
#endif
extern void device_pm_lock(void);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
extern void device_pm_unlock(void);
extern int sysdev_suspend(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);

View File

@ -33,12 +33,13 @@ struct sysdev_class {
const char *name;
struct list_head drivers;
struct sysdev_class_attribute **attrs;
struct kset kset;
#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
/* Default operations for these types of devices */
int (*shutdown)(struct sys_device *);
int (*suspend)(struct sys_device *, pm_message_t state);
int (*resume)(struct sys_device *);
struct kset kset;
#endif
};
struct sysdev_class_attribute {
@ -76,9 +77,11 @@ struct sysdev_driver {
struct list_head entry;
int (*add)(struct sys_device *);
int (*remove)(struct sys_device *);
#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
int (*shutdown)(struct sys_device *);
int (*suspend)(struct sys_device *, pm_message_t state);
int (*resume)(struct sys_device *);
#endif
};

View File

@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
@ -597,13 +597,12 @@ static struct timespec timekeeping_suspend_time;
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
* @dev: unused
*
* This is for the generic clocksource timekeeping.
* xtime/wall_to_monotonic/jiffies/etc are
* still managed by arch specific suspend/resume code.
*/
static int timekeeping_resume(struct sys_device *dev)
static void timekeeping_resume(void)
{
unsigned long flags;
struct timespec ts;
@ -632,11 +631,9 @@ static int timekeeping_resume(struct sys_device *dev)
/* Resume hrtimers */
hres_timers_resume();
return 0;
}
static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
static int timekeeping_suspend(void)
{
unsigned long flags;
@ -654,26 +651,18 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
}
/* sysfs resume/suspend bits for timekeeping */
static struct sysdev_class timekeeping_sysclass = {
.name = "timekeeping",
static struct syscore_ops timekeeping_syscore_ops = {
.resume = timekeeping_resume,
.suspend = timekeeping_suspend,
};
static struct sys_device device_timer = {
.id = 0,
.cls = &timekeeping_sysclass,
};
static int __init timekeeping_init_device(void)
static int __init timekeeping_init_ops(void)
{
int error = sysdev_class_register(&timekeeping_sysclass);
if (!error)
error = sysdev_register(&device_timer);
return error;
register_syscore_ops(&timekeeping_syscore_ops);
return 0;
}
device_initcall(timekeeping_init_device);
device_initcall(timekeeping_init_ops);
/*
* If the error is already larger, we look ahead even further

View File

@ -30,7 +30,7 @@
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
@ -2446,33 +2446,26 @@ static void kvm_exit_debug(void)
debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
static int kvm_suspend(void)
{
if (kvm_usage_count)
hardware_disable_nolock(NULL);
return 0;
}
static int kvm_resume(struct sys_device *dev)
static void kvm_resume(void)
{
if (kvm_usage_count) {
WARN_ON(raw_spin_is_locked(&kvm_lock));
hardware_enable_nolock(NULL);
}
return 0;
}
static struct sysdev_class kvm_sysdev_class = {
.name = "kvm",
static struct syscore_ops kvm_syscore_ops = {
.suspend = kvm_suspend,
.resume = kvm_resume,
};
static struct sys_device kvm_sysdev = {
.id = 0,
.cls = &kvm_sysdev_class,
};
struct page *bad_page;
pfn_t bad_pfn;
@ -2556,14 +2549,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_2;
register_reboot_notifier(&kvm_reboot_notifier);
r = sysdev_class_register(&kvm_sysdev_class);
if (r)
goto out_free_3;
r = sysdev_register(&kvm_sysdev);
if (r)
goto out_free_4;
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
@ -2571,7 +2556,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
0, NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_5;
goto out_free_3;
}
r = kvm_async_pf_init();
@ -2588,6 +2573,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_unreg;
}
register_syscore_ops(&kvm_syscore_ops);
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
@ -2599,10 +2586,6 @@ out_unreg:
kvm_async_pf_deinit();
out_free:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_5:
sysdev_unregister(&kvm_sysdev);
out_free_4:
sysdev_class_unregister(&kvm_sysdev_class);
out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
@ -2630,8 +2613,7 @@ void kvm_exit(void)
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit();
sysdev_unregister(&kvm_sysdev);
sysdev_class_unregister(&kvm_sysdev_class);
unregister_syscore_ops(&kvm_syscore_ops);
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable_nolock, NULL, 1);