Merge git://git.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (40 commits)
  x86: HPET add another ICH7 PCI id
  x86: HPET force enable ICH5 suspend/resume fix
  x86: HPET force enable for ICH5
  x86: HPET try to activate force detected hpet
  x86: HPET force enable o ICH7 and later
  x86: HPET restructure hpet code for hpet force enable
  clock events: allow replacement of broadcast timer
  i386/x8664: cleanup the shared hpet code
  i386: Remove the useless #ifdef in i8253.h
  ACPI: remove the now unused ifdef code
  jiffies: remove unused macros
  x86_64: cleanup apic.c after clock events switch
  x86_64: remove now unused code
  x86: unify timex.h variants
  x86: kill 8253pit.h
  x86: disable apic timer for AMD C1E enabled CPUs
  x86: Fix irq0 / local apic timer accounting
  x86_64: convert to clock events
  x86_64: Add (not yet used) clock event functions
  x86_64: prepare idle loop for dynamic ticks
  ...
This commit is contained in:
Linus Torvalds 2007-10-12 15:39:39 -07:00
commit 57c5b9998e
51 changed files with 1309 additions and 1265 deletions

View file

@ -1009,6 +1009,10 @@ and is between 256 and 4096 characters. It is defined in the file
meye.*= [HW] Set MotionEye Camera parameters meye.*= [HW] Set MotionEye Camera parameters
See Documentation/video4linux/meye.txt. See Documentation/video4linux/meye.txt.
mfgpt_irq= [IA-32] Specify the IRQ to use for the
Multi-Function General Purpose Timers on AMD Geode
platforms.
mga= [HW,DRM] mga= [HW,DRM]
mousedev.tap_time= mousedev.tap_time=
@ -1160,6 +1164,9 @@ and is between 256 and 4096 characters. It is defined in the file
nomce [X86-32] Machine Check Exception nomce [X86-32] Machine Check Exception
nomfgpt [X86-32] Disable Multi-Function General Purpose
Timer usage (for AMD Geode machines).
noreplace-paravirt [X86-32,PV_OPS] Don't patch paravirt_ops noreplace-paravirt [X86-32,PV_OPS] Don't patch paravirt_ops
noreplace-smp [X86-32,SMP] Don't replace SMP instructions noreplace-smp [X86-32,SMP] Don't replace SMP instructions

View file

@ -1206,6 +1206,16 @@ config SCx200HR_TIMER
processor goes idle (as is done by the scheduler). The processor goes idle (as is done by the scheduler). The
other workaround is idle=poll boot option. other workaround is idle=poll boot option.
config GEODE_MFGPT_TIMER
bool "Geode Multi-Function General Purpose Timer (MFGPT) events"
depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
default y
help
This driver provides a clock event source based on the MFGPT
timer(s) in the CS5535 and CS5536 companion chip for the geode.
MFGPTs have a better resolution and max interval than the
generic PIT, and are suitable for use as high-res timers.
config K8_NB config K8_NB
def_bool y def_bool y
depends on AGP_AMD64 depends on AGP_AMD64

View file

@ -7,7 +7,7 @@ extra-y := head_32.o init_task_32.o vmlinux.lds
obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\ pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
quirks.o i8237.o topology.o alternative.o i8253_32.o tsc_32.o quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/ obj-y += cpu/
@ -37,9 +37,9 @@ obj-$(CONFIG_EFI) += efi_32.o efi_stub_32.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
obj-$(CONFIG_VM86) += vm86_32.o obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet_32.o obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_K8_NB) += k8.o obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_MGEODE_LX) += geode_32.o obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
obj-$(CONFIG_PARAVIRT) += paravirt_32.o obj-$(CONFIG_PARAVIRT) += paravirt_32.o

View file

@ -8,8 +8,8 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
pci-dma_64.o pci-nommu_64.o alternative.o hpet_64.o tsc_64.o bugs_64.o \ pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
perfctr-watchdog.o perfctr-watchdog.o i8253.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o

View file

@ -25,6 +25,7 @@
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/clockchips.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/smp.h> #include <asm/smp.h>
@ -39,12 +40,9 @@
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/apic.h> #include <asm/apic.h>
int apic_mapped;
int apic_verbosity; int apic_verbosity;
int apic_runs_main_timer; int disable_apic_timer __cpuinitdata;
int apic_calibrate_pmtmr __initdata; static int apic_calibrate_pmtmr __initdata;
int disable_apic_timer __initdata;
/* Local APIC timer works in C2? */ /* Local APIC timer works in C2? */
int local_apic_timer_c2_ok; int local_apic_timer_c2_ok;
@ -56,14 +54,78 @@ static struct resource lapic_resource = {
.flags = IORESOURCE_MEM | IORESOURCE_BUSY, .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
}; };
/* static unsigned int calibration_result;
* cpu_mask that denotes the CPUs that needs timer interrupt coming in as
* IPIs in place of local APIC timers
*/
static cpumask_t timer_interrupt_broadcast_ipi_mask;
/* Using APIC to generate smp_local_timer_interrupt? */ static int lapic_next_event(unsigned long delta,
int using_apic_timer __read_mostly = 0; struct clock_event_device *evt);
static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt);
static void lapic_timer_broadcast(cpumask_t mask);
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
static struct clock_event_device lapic_clockevent = {
.name = "lapic",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
.shift = 32,
.set_mode = lapic_timer_setup,
.set_next_event = lapic_next_event,
.broadcast = lapic_timer_broadcast,
.rating = 100,
.irq = -1,
};
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt)
{
apic_write(APIC_TMICT, delta);
return 0;
}
static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned long flags;
unsigned int v;
/* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return;
local_irq_save(flags);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
__setup_APIC_LVTT(calibration_result,
mode != CLOCK_EVT_MODE_PERIODIC, 1);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
break;
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here */
break;
}
local_irq_restore(flags);
}
/*
* Local APIC timer broadcast function
*/
static void lapic_timer_broadcast(cpumask_t mask)
{
#ifdef CONFIG_SMP
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#endif
}
static void apic_pm_activate(void); static void apic_pm_activate(void);
@ -184,7 +246,10 @@ void disconnect_bsp_APIC(int virt_wire_setup)
apic_write(APIC_SPIV, value); apic_write(APIC_SPIV, value);
if (!virt_wire_setup) { if (!virt_wire_setup) {
/* For LVT0 make it edge triggered, active high, external and enabled */ /*
* For LVT0 make it edge triggered, active high,
* external and enabled
*/
value = apic_read(APIC_LVT0); value = apic_read(APIC_LVT0);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
@ -420,10 +485,12 @@ void __cpuinit setup_local_APIC (void)
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
if (!smp_processor_id() && !value) { if (!smp_processor_id() && !value) {
value = APIC_DM_EXTINT; value = APIC_DM_EXTINT;
apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id()); apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
smp_processor_id());
} else { } else {
value = APIC_DM_EXTINT | APIC_LVT_MASKED; value = APIC_DM_EXTINT | APIC_LVT_MASKED;
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id()); apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
smp_processor_id());
} }
apic_write(APIC_LVT0, value); apic_write(APIC_LVT0, value);
@ -706,8 +773,8 @@ void __init init_apic_mappings(void)
apic_phys = mp_lapic_addr; apic_phys = mp_lapic_addr;
set_fixmap_nocache(FIX_APIC_BASE, apic_phys); set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
apic_mapped = 1; apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys); APIC_BASE, apic_phys);
/* Put local APIC into the resource map. */ /* Put local APIC into the resource map. */
lapic_resource.start = apic_phys; lapic_resource.start = apic_phys;
@ -730,12 +797,14 @@ void __init init_apic_mappings(void)
if (smp_found_config) { if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mpc_apicaddr; ioapic_phys = mp_ioapics[i].mpc_apicaddr;
} else { } else {
ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); ioapic_phys = (unsigned long)
alloc_bootmem_pages(PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys); ioapic_phys = __pa(ioapic_phys);
} }
set_fixmap_nocache(idx, ioapic_phys); set_fixmap_nocache(idx, ioapic_phys);
apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n", apic_printk(APIC_VERBOSE,
__fix_to_virt(idx), ioapic_phys); "mapped IOAPIC to %016lx (%016lx)\n",
__fix_to_virt(idx), ioapic_phys);
idx++; idx++;
if (ioapic_res != NULL) { if (ioapic_res != NULL) {
@ -758,16 +827,14 @@ void __init init_apic_mappings(void)
* P5 APIC double write bug. * P5 APIC double write bug.
*/ */
#define APIC_DIVISOR 16 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
static void __setup_APIC_LVTT(unsigned int clocks)
{ {
unsigned int lvtt_value, tmp_value; unsigned int lvtt_value, tmp_value;
int cpu = smp_processor_id();
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; lvtt_value = LOCAL_TIMER_VECTOR;
if (!oneshot)
if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) lvtt_value |= APIC_LVT_TIMER_PERIODIC;
if (!irqen)
lvtt_value |= APIC_LVT_MASKED; lvtt_value |= APIC_LVT_MASKED;
apic_write(APIC_LVTT, lvtt_value); apic_write(APIC_LVTT, lvtt_value);
@ -780,44 +847,18 @@ static void __setup_APIC_LVTT(unsigned int clocks)
& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
| APIC_TDR_DIV_16); | APIC_TDR_DIV_16);
apic_write(APIC_TMICT, clocks/APIC_DIVISOR); if (!oneshot)
apic_write(APIC_TMICT, clocks);
} }
static void setup_APIC_timer(unsigned int clocks) static void setup_APIC_timer(void)
{ {
unsigned long flags; struct clock_event_device *levt = &__get_cpu_var(lapic_events);
local_irq_save(flags); memcpy(levt, &lapic_clockevent, sizeof(*levt));
levt->cpumask = cpumask_of_cpu(smp_processor_id());
/* wait for irq slice */ clockevents_register_device(levt);
if (hpet_address && hpet_use_timer) {
u32 trigger = hpet_readl(HPET_T0_CMP);
while (hpet_readl(HPET_T0_CMP) == trigger)
/* do nothing */ ;
} else {
int c1, c2;
outb_p(0x00, 0x43);
c2 = inb_p(0x40);
c2 |= inb_p(0x40) << 8;
do {
c1 = c2;
outb_p(0x00, 0x43);
c2 = inb_p(0x40);
c2 |= inb_p(0x40) << 8;
} while (c2 - c1 < 300);
}
__setup_APIC_LVTT(clocks);
/* Turn off PIT interrupt if we use APIC timer as main timer.
Only works with the PM timer right now
TBD fix it for HPET too. */
if ((pmtmr_ioport != 0) &&
smp_processor_id() == boot_cpu_id &&
apic_runs_main_timer == 1 &&
!cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
stop_timer_interrupt();
apic_runs_main_timer++;
}
local_irq_restore(flags);
} }
/* /*
@ -835,17 +876,22 @@ static void setup_APIC_timer(unsigned int clocks)
#define TICK_COUNT 100000000 #define TICK_COUNT 100000000
static int __init calibrate_APIC_clock(void) static void __init calibrate_APIC_clock(void)
{ {
unsigned apic, apic_start; unsigned apic, apic_start;
unsigned long tsc, tsc_start; unsigned long tsc, tsc_start;
int result; int result;
local_irq_disable();
/* /*
* Put whatever arbitrary (but long enough) timeout * Put whatever arbitrary (but long enough) timeout
* value into the APIC clock, we just want to get the * value into the APIC clock, we just want to get the
* counter running for calibration. * counter running for calibration.
*
* No interrupt enable !
*/ */
__setup_APIC_LVTT(4000000000); __setup_APIC_LVTT(250000000, 0, 0);
apic_start = apic_read(APIC_TMCCT); apic_start = apic_read(APIC_TMCCT);
#ifdef CONFIG_X86_PM_TIMER #ifdef CONFIG_X86_PM_TIMER
@ -867,123 +913,62 @@ static int __init calibrate_APIC_clock(void)
result = (apic_start - apic) * 1000L * tsc_khz / result = (apic_start - apic) * 1000L * tsc_khz /
(tsc - tsc_start); (tsc - tsc_start);
} }
printk("result %d\n", result);
local_irq_enable();
printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
result / 1000 / 1000, result / 1000 % 1000); result / 1000 / 1000, result / 1000 % 1000);
return result * APIC_DIVISOR / HZ; /* Calculate the scaled math multiplication factor */
} lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
lapic_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &lapic_clockevent);
static unsigned int calibration_result; calibration_result = result / HZ;
}
void __init setup_boot_APIC_clock (void) void __init setup_boot_APIC_clock (void)
{ {
/*
* The local apic timer can be disabled via the kernel commandline.
* Register the lapic timer as a dummy clock event source on SMP
* systems, so the broadcast mechanism is used. On UP systems simply
* ignore it.
*/
if (disable_apic_timer) { if (disable_apic_timer) {
printk(KERN_INFO "Disabling APIC timer\n"); printk(KERN_INFO "Disabling APIC timer\n");
/* No broadcast on UP ! */
if (num_possible_cpus() > 1)
setup_APIC_timer();
return; return;
} }
printk(KERN_INFO "Using local APIC timer interrupts.\n"); printk(KERN_INFO "Using local APIC timer interrupts.\n");
using_apic_timer = 1; calibrate_APIC_clock();
local_irq_disable();
calibration_result = calibrate_APIC_clock();
/* /*
* Now set up the timer for real. * If nmi_watchdog is set to IO_APIC, we need the
* PIT/HPET going. Otherwise register lapic as a dummy
* device.
*/ */
setup_APIC_timer(calibration_result); if (nmi_watchdog != NMI_IO_APIC)
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
else
printk(KERN_WARNING "APIC timer registered as dummy,"
" due to nmi_watchdog=1!\n");
local_irq_enable(); setup_APIC_timer();
} }
void __cpuinit setup_secondary_APIC_clock(void) void __cpuinit setup_secondary_APIC_clock(void)
{ {
local_irq_disable(); /* FIXME: Do we need this? --RR */ setup_APIC_timer();
setup_APIC_timer(calibration_result);
local_irq_enable();
} }
void disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
v = apic_read(APIC_LVTT);
/*
* When an illegal vector value (0-15) is written to an LVT
* entry and delivery mode is Fixed, the APIC may signal an
* illegal vector error, with out regard to whether the mask
* bit is set or whether an interrupt is actually seen on input.
*
* Boot sequence might call this function when the LVTT has
* '0' vector value. So make sure vector field is set to
* valid value.
*/
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
}
}
void enable_APIC_timer(void)
{
int cpu = smp_processor_id();
if (using_apic_timer &&
!cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
unsigned long v;
v = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
}
}
void switch_APIC_timer_to_ipi(void *cpumask)
{
cpumask_t mask = *(cpumask_t *)cpumask;
int cpu = smp_processor_id();
if (cpu_isset(cpu, mask) &&
!cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
disable_APIC_timer();
cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
}
}
EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
void smp_send_timer_broadcast_ipi(void)
{
int cpu = smp_processor_id();
cpumask_t mask;
cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
if (cpu_isset(cpu, mask)) {
cpu_clear(cpu, mask);
add_pda(apic_timer_irqs, 1);
smp_local_timer_interrupt();
}
if (!cpus_empty(mask)) {
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
}
}
void switch_ipi_to_APIC_timer(void *cpumask)
{
cpumask_t mask = *(cpumask_t *)cpumask;
int cpu = smp_processor_id();
if (cpu_isset(cpu, mask) &&
cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
enable_APIC_timer();
}
}
EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
int setup_profiling_timer(unsigned int multiplier) int setup_profiling_timer(unsigned int multiplier)
{ {
return -EINVAL; return -EINVAL;
@ -997,8 +982,6 @@ void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
apic_write(reg, v); apic_write(reg, v);
} }
#undef APIC_DIVISOR
/* /*
* Local timer interrupt handler. It does both profiling and * Local timer interrupt handler. It does both profiling and
* process statistics/rescheduling. * process statistics/rescheduling.
@ -1011,22 +994,34 @@ void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
void smp_local_timer_interrupt(void) void smp_local_timer_interrupt(void)
{ {
profile_tick(CPU_PROFILING); int cpu = smp_processor_id();
#ifdef CONFIG_SMP struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
update_process_times(user_mode(get_irq_regs()));
#endif
if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
main_timer_handler();
/* /*
* We take the 'long' return path, and there every subsystem * Normally we should not be here till LAPIC has been initialized but
* grabs the appropriate locks (kernel lock/ irq lock). * in some cases like kdump, its possible that there is a pending LAPIC
* timer interrupt from previous kernel's context and is delivered in
* new kernel the moment interrupts are enabled.
* *
* We might want to decouple profiling from the 'long path', * Interrupts are enabled early and LAPIC is setup much later, hence
* and do the profiling totally in assembly. * its possible that when we get here evt->event_handler is NULL.
* * Check for event_handler being NULL and discard the interrupt as
* Currently this isn't too much of an issue (performance wise), * spurious.
* we can take more than 100K local irqs per second on a 100 MHz P5.
*/ */
if (!evt->event_handler) {
printk(KERN_WARNING
"Spurious LAPIC timer interrupt on cpu %d\n", cpu);
/* Switch it off */
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
return;
}
/*
* the NMI deadlock-detector uses this.
*/
add_pda(apic_timer_irqs, 1);
evt->event_handler(evt);
} }
/* /*
@ -1041,11 +1036,6 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
/*
* the NMI deadlock-detector uses this.
*/
add_pda(apic_timer_irqs, 1);
/* /*
* NOTE! We'd better ACK the irq immediately, * NOTE! We'd better ACK the irq immediately,
* because timer handling can be slow. * because timer handling can be slow.
@ -1225,29 +1215,13 @@ static __init int setup_noapictimer(char *str)
disable_apic_timer = 1; disable_apic_timer = 1;
return 1; return 1;
} }
__setup("noapictimer", setup_noapictimer);
static __init int setup_apicmaintimer(char *str)
{
apic_runs_main_timer = 1;
nohpet = 1;
return 1;
}
__setup("apicmaintimer", setup_apicmaintimer);
static __init int setup_noapicmaintimer(char *str)
{
apic_runs_main_timer = -1;
return 1;
}
__setup("noapicmaintimer", setup_noapicmaintimer);
static __init int setup_apicpmtimer(char *s) static __init int setup_apicpmtimer(char *s)
{ {
apic_calibrate_pmtmr = 1; apic_calibrate_pmtmr = 1;
notsc_setup(NULL); notsc_setup(NULL);
return setup_apicmaintimer(NULL); return 0;
} }
__setup("apicpmtimer", setup_apicpmtimer); __setup("apicpmtimer", setup_apicpmtimer);
__setup("noapictimer", setup_noapictimer);

View file

@ -145,10 +145,14 @@ EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
static int __init geode_southbridge_init(void) static int __init geode_southbridge_init(void)
{ {
int timers;
if (!is_geode()) if (!is_geode())
return -ENODEV; return -ENODEV;
init_lbars(); init_lbars();
timers = geode_mfgpt_detect();
printk(KERN_INFO "geode: %d MFGPT timers available.\n", timers);
return 0; return 0;
} }

View file

@ -1,5 +1,6 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/hpet.h> #include <linux/hpet.h>
#include <linux/init.h> #include <linux/init.h>
@ -7,11 +8,11 @@
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/fixmap.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/i8253.h>
#include <asm/io.h> #include <asm/io.h>
extern struct clock_event_device *global_clock_event;
#define HPET_MASK CLOCKSOURCE_MASK(32) #define HPET_MASK CLOCKSOURCE_MASK(32)
#define HPET_SHIFT 22 #define HPET_SHIFT 22
@ -22,9 +23,9 @@ extern struct clock_event_device *global_clock_event;
* HPET address is set in acpi/boot.c, when an ACPI entry exists * HPET address is set in acpi/boot.c, when an ACPI entry exists
*/ */
unsigned long hpet_address; unsigned long hpet_address;
static void __iomem * hpet_virt_address; static void __iomem *hpet_virt_address;
static inline unsigned long hpet_readl(unsigned long a) unsigned long hpet_readl(unsigned long a)
{ {
return readl(hpet_virt_address + a); return readl(hpet_virt_address + a);
} }
@ -34,6 +35,36 @@ static inline void hpet_writel(unsigned long d, unsigned long a)
writel(d, hpet_virt_address + a); writel(d, hpet_virt_address + a);
} }
#ifdef CONFIG_X86_64
#include <asm/pgtable.h>
static inline void hpet_set_mapping(void)
{
set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
hpet_virt_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
}
static inline void hpet_clear_mapping(void)
{
hpet_virt_address = NULL;
}
#else
static inline void hpet_set_mapping(void)
{
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
}
static inline void hpet_clear_mapping(void)
{
iounmap(hpet_virt_address);
hpet_virt_address = NULL;
}
#endif
/* /*
* HPET command line enable / disable * HPET command line enable / disable
*/ */
@ -49,6 +80,13 @@ static int __init hpet_setup(char* str)
} }
__setup("hpet=", hpet_setup); __setup("hpet=", hpet_setup);
static int __init disable_hpet(char *str)
{
boot_hpet_disable = 1;
return 1;
}
__setup("nohpet", disable_hpet);
static inline int is_hpet_capable(void) static inline int is_hpet_capable(void)
{ {
return (!boot_hpet_disable && hpet_address); return (!boot_hpet_disable && hpet_address);
@ -83,7 +121,7 @@ static void hpet_reserve_platform_timers(unsigned long id)
memset(&hd, 0, sizeof (hd)); memset(&hd, 0, sizeof (hd));
hd.hd_phys_address = hpet_address; hd.hd_phys_address = hpet_address;
hd.hd_address = hpet_virt_address; hd.hd_address = hpet;
hd.hd_nirqs = nrtimers; hd.hd_nirqs = nrtimers;
hd.hd_flags = HPET_DATA_PLATFORM; hd.hd_flags = HPET_DATA_PLATFORM;
hpet_reserve_timer(&hd, 0); hpet_reserve_timer(&hd, 0);
@ -111,9 +149,9 @@ static void hpet_reserve_platform_timers(unsigned long id) { }
*/ */
static unsigned long hpet_period; static unsigned long hpet_period;
static void hpet_set_mode(enum clock_event_mode mode, static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt); struct clock_event_device *evt);
static int hpet_next_event(unsigned long delta, static int hpet_legacy_next_event(unsigned long delta,
struct clock_event_device *evt); struct clock_event_device *evt);
/* /*
@ -122,10 +160,11 @@ static int hpet_next_event(unsigned long delta,
static struct clock_event_device hpet_clockevent = { static struct clock_event_device hpet_clockevent = {
.name = "hpet", .name = "hpet",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = hpet_set_mode, .set_mode = hpet_legacy_set_mode,
.set_next_event = hpet_next_event, .set_next_event = hpet_legacy_next_event,
.shift = 32, .shift = 32,
.irq = 0, .irq = 0,
.rating = 50,
}; };
static void hpet_start_counter(void) static void hpet_start_counter(void)
@ -140,7 +179,18 @@ static void hpet_start_counter(void)
hpet_writel(cfg, HPET_CFG); hpet_writel(cfg, HPET_CFG);
} }
static void hpet_enable_int(void) static void hpet_resume_device(void)
{
force_hpet_resume();
}
static void hpet_restart_counter(void)
{
hpet_resume_device();
hpet_start_counter();
}
static void hpet_enable_legacy_int(void)
{ {
unsigned long cfg = hpet_readl(HPET_CFG); unsigned long cfg = hpet_readl(HPET_CFG);
@ -149,7 +199,39 @@ static void hpet_enable_int(void)
hpet_legacy_int_enabled = 1; hpet_legacy_int_enabled = 1;
} }
static void hpet_set_mode(enum clock_event_mode mode, static void hpet_legacy_clockevent_register(void)
{
uint64_t hpet_freq;
/* Start HPET legacy interrupts */
hpet_enable_legacy_int();
/*
* The period is a femto seconds value. We need to calculate the
* scaled math multiplication factor for nanosecond to hpet tick
* conversion.
*/
hpet_freq = 1000000000000000ULL;
do_div(hpet_freq, hpet_period);
hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
NSEC_PER_SEC, 32);
/* Calculate the min / max delta */
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
&hpet_clockevent);
hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
&hpet_clockevent);
/*
* Start hpet with the boot cpu mask and make it
* global after the IO_APIC has been initialized.
*/
hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
clockevents_register_device(&hpet_clockevent);
global_clock_event = &hpet_clockevent;
printk(KERN_DEBUG "hpet clockevent registered\n");
}
static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
unsigned long cfg, cmp, now; unsigned long cfg, cmp, now;
@ -190,12 +272,12 @@ static void hpet_set_mode(enum clock_event_mode mode,
break; break;
case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_RESUME:
hpet_enable_int(); hpet_enable_legacy_int();
break; break;
} }
} }
static int hpet_next_event(unsigned long delta, static int hpet_legacy_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
unsigned long cnt; unsigned long cnt;
@ -215,6 +297,13 @@ static cycle_t read_hpet(void)
return (cycle_t)hpet_readl(HPET_COUNTER); return (cycle_t)hpet_readl(HPET_COUNTER);
} }
#ifdef CONFIG_X86_64
static cycle_t __vsyscall_fn vread_hpet(void)
{
return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
}
#endif
static struct clocksource clocksource_hpet = { static struct clocksource clocksource_hpet = {
.name = "hpet", .name = "hpet",
.rating = 250, .rating = 250,
@ -222,61 +311,17 @@ static struct clocksource clocksource_hpet = {
.mask = HPET_MASK, .mask = HPET_MASK,
.shift = HPET_SHIFT, .shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = hpet_start_counter, .resume = hpet_restart_counter,
#ifdef CONFIG_X86_64
.vread = vread_hpet,
#endif
}; };
/* static int hpet_clocksource_register(void)
* Try to setup the HPET timer
*/
int __init hpet_enable(void)
{ {
unsigned long id;
uint64_t hpet_freq;
u64 tmp, start, now; u64 tmp, start, now;
cycle_t t1; cycle_t t1;
if (!is_hpet_capable())
return 0;
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
/*
* Read the period and check for a sane value:
*/
hpet_period = hpet_readl(HPET_PERIOD);
if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
goto out_nohpet;
/*
* The period is a femto seconds value. We need to calculate the
* scaled math multiplication factor for nanosecond to hpet tick
* conversion.
*/
hpet_freq = 1000000000000000ULL;
do_div(hpet_freq, hpet_period);
hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
NSEC_PER_SEC, 32);
/* Calculate the min / max delta */
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
&hpet_clockevent);
hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
&hpet_clockevent);
/*
* Read the HPET ID register to retrieve the IRQ routing
* information and the number of channels
*/
id = hpet_readl(HPET_ID);
#ifdef CONFIG_HPET_EMULATE_RTC
/*
* The legacy routing mode needs at least two channels, tick timer
* and the rtc emulation channel.
*/
if (!(id & HPET_ID_NUMBER))
goto out_nohpet;
#endif
/* Start the counter */ /* Start the counter */
hpet_start_counter(); hpet_start_counter();
@ -298,7 +343,7 @@ int __init hpet_enable(void)
if (t1 == read_hpet()) { if (t1 == read_hpet()) {
printk(KERN_WARNING printk(KERN_WARNING
"HPET counter not counting. HPET disabled\n"); "HPET counter not counting. HPET disabled\n");
goto out_nohpet; return -ENODEV;
} }
/* Initialize and register HPET clocksource /* Initialize and register HPET clocksource
@ -319,27 +364,84 @@ int __init hpet_enable(void)
clocksource_register(&clocksource_hpet); clocksource_register(&clocksource_hpet);
return 0;
}
/*
* Try to setup the HPET timer
*/
int __init hpet_enable(void)
{
unsigned long id;
if (!is_hpet_capable())
return 0;
hpet_set_mapping();
/*
* Read the period and check for a sane value:
*/
hpet_period = hpet_readl(HPET_PERIOD);
if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
goto out_nohpet;
/*
* Read the HPET ID register to retrieve the IRQ routing
* information and the number of channels
*/
id = hpet_readl(HPET_ID);
#ifdef CONFIG_HPET_EMULATE_RTC
/*
* The legacy routing mode needs at least two channels, tick timer
* and the rtc emulation channel.
*/
if (!(id & HPET_ID_NUMBER))
goto out_nohpet;
#endif
if (hpet_clocksource_register())
goto out_nohpet;
if (id & HPET_ID_LEGSUP) { if (id & HPET_ID_LEGSUP) {
hpet_enable_int(); hpet_legacy_clockevent_register();
hpet_reserve_platform_timers(id);
/*
* Start hpet with the boot cpu mask and make it
* global after the IO_APIC has been initialized.
*/
hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
clockevents_register_device(&hpet_clockevent);
global_clock_event = &hpet_clockevent;
return 1; return 1;
} }
return 0; return 0;
out_nohpet: out_nohpet:
iounmap(hpet_virt_address); hpet_clear_mapping();
hpet_virt_address = NULL;
boot_hpet_disable = 1; boot_hpet_disable = 1;
return 0; return 0;
} }
/*
* Needs to be late, as the reserve_timer code calls kalloc !
*
* Not a problem on i386 as hpet_enable is called from late_time_init,
* but on x86_64 it is necessary !
*/
static __init int hpet_late_init(void)
{
if (boot_hpet_disable)
return -ENODEV;
if (!hpet_address) {
if (!force_hpet_address)
return -ENODEV;
hpet_address = force_hpet_address;
hpet_enable();
if (!hpet_virt_address)
return -ENODEV;
}
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
return 0;
}
fs_initcall(hpet_late_init);
#ifdef CONFIG_HPET_EMULATE_RTC #ifdef CONFIG_HPET_EMULATE_RTC

View file

@ -1,493 +0,0 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/clocksource.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
#include <linux/hpet.h>
#include <asm/pgtable.h>
#include <asm/vsyscall.h>
#include <asm/timex.h>
#include <asm/hpet.h>
#define HPET_MASK 0xFFFFFFFF
#define HPET_SHIFT 22
/* FSEC = 10^-15 NSEC = 10^-9 */
#define FSEC_PER_NSEC 1000000
int nohpet __initdata;
unsigned long hpet_address;
unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
int hpet_use_timer; /* Use counter of hpet for time keeping,
* otherwise PIT
*/
#ifdef CONFIG_HPET
static __init int late_hpet_init(void)
{
struct hpet_data hd;
unsigned int ntimer;
if (!hpet_address)
return 0;
memset(&hd, 0, sizeof(hd));
ntimer = hpet_readl(HPET_ID);
ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
ntimer++;
/*
* Register with driver.
* Timer0 and Timer1 is used by platform.
*/
hd.hd_phys_address = hpet_address;
hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
hd.hd_nirqs = ntimer;
hd.hd_flags = HPET_DATA_PLATFORM;
hpet_reserve_timer(&hd, 0);
#ifdef CONFIG_HPET_EMULATE_RTC
hpet_reserve_timer(&hd, 1);
#endif
hd.hd_irq[0] = HPET_LEGACY_8254;
hd.hd_irq[1] = HPET_LEGACY_RTC;
if (ntimer > 2) {
struct hpet *hpet;
struct hpet_timer *timer;
int i;
hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
timer = &hpet->hpet_timers[2];
for (i = 2; i < ntimer; timer++, i++)
hd.hd_irq[i] = (timer->hpet_config &
Tn_INT_ROUTE_CNF_MASK) >>
Tn_INT_ROUTE_CNF_SHIFT;
}
hpet_alloc(&hd);
return 0;
}
fs_initcall(late_hpet_init);
#endif
int hpet_timer_stop_set_go(unsigned long tick)
{
unsigned int cfg;
/*
* Stop the timers and reset the main counter.
*/
cfg = hpet_readl(HPET_CFG);
cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
hpet_writel(cfg, HPET_CFG);
hpet_writel(0, HPET_COUNTER);
hpet_writel(0, HPET_COUNTER + 4);
/*
* Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
* and period also hpet_tick.
*/
if (hpet_use_timer) {
hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT, HPET_T0_CFG);
hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
cfg |= HPET_CFG_LEGACY;
}
/*
* Go!
*/
cfg |= HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
return 0;
}
static cycle_t read_hpet(void)
{
return (cycle_t)hpet_readl(HPET_COUNTER);
}
static cycle_t __vsyscall_fn vread_hpet(void)
{
return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
}
struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = (cycle_t)HPET_MASK,
.mult = 0, /* set below */
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.vread = vread_hpet,
};
int __init hpet_arch_init(void)
{
unsigned int id;
u64 tmp;
if (!hpet_address)
return -1;
set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
/*
* Read the period, compute tick and quotient.
*/
id = hpet_readl(HPET_ID);
if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
return -1;
hpet_period = hpet_readl(HPET_PERIOD);
if (hpet_period < 100000 || hpet_period > 100000000)
return -1;
hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
hpet_use_timer = (id & HPET_ID_LEGSUP);
/*
* hpet period is in femto seconds per cycle
* so we need to convert this to ns/cyc units
* aproximated by mult/2^shift
*
* fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
* fsec/cyc * 1ns/1000000fsec * 2^shift = mult
* fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
* (fsec/cyc << shift)/1000000 = mult
* (hpet_period << shift)/FSEC_PER_NSEC = mult
*/
tmp = (u64)hpet_period << HPET_SHIFT;
do_div(tmp, FSEC_PER_NSEC);
clocksource_hpet.mult = (u32)tmp;
clocksource_register(&clocksource_hpet);
return hpet_timer_stop_set_go(hpet_tick);
}
int hpet_reenable(void)
{
return hpet_timer_stop_set_go(hpet_tick);
}
/*
* calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
* it to the HPET timer of known frequency.
*/
#define TICK_COUNT 100000000
#define SMI_THRESHOLD 50000
#define MAX_TRIES 5
/*
* Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
* occurs between the reads of the hpet & TSC.
*/
static void __init read_hpet_tsc(int *hpet, int *tsc)
{
int tsc1, tsc2, hpet1, i;
for (i = 0; i < MAX_TRIES; i++) {
tsc1 = get_cycles_sync();
hpet1 = hpet_readl(HPET_COUNTER);
tsc2 = get_cycles_sync();
if ((tsc2 - tsc1) < SMI_THRESHOLD)
break;
}
*hpet = hpet1;
*tsc = tsc2;
}
unsigned int __init hpet_calibrate_tsc(void)
{
int tsc_start, hpet_start;
int tsc_now, hpet_now;
unsigned long flags;
local_irq_save(flags);
read_hpet_tsc(&hpet_start, &tsc_start);
do {
local_irq_disable();
read_hpet_tsc(&hpet_now, &tsc_now);
local_irq_restore(flags);
} while ((tsc_now - tsc_start) < TICK_COUNT &&
(hpet_now - hpet_start) < TICK_COUNT);
return (tsc_now - tsc_start) * 1000000000L
/ ((hpet_now - hpet_start) * hpet_period / 1000);
}
#ifdef CONFIG_HPET_EMULATE_RTC
/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
* is enabled, we support RTC interrupt functionality in software.
* RTC has 3 kinds of interrupts:
* 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
* is updated
* 2) Alarm Interrupt - generate an interrupt at a specific time of day
* 3) Periodic Interrupt - generate periodic interrupt, with frequencies
* 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
* (1) and (2) above are implemented using polling at a frequency of
* 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
* overhead. (DEFAULT_RTC_INT_FREQ)
* For (3), we use interrupts at 64Hz or user specified periodic
* frequency, whichever is higher.
*/
#include <linux/rtc.h>
#define DEFAULT_RTC_INT_FREQ 64
#define RTC_NUM_INTS 1
static unsigned long UIE_on;
static unsigned long prev_update_sec;
static unsigned long AIE_on;
static struct rtc_time alarm_time;
static unsigned long PIE_on;
static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
static unsigned long PIE_count;
static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
static unsigned int hpet_t1_cmp; /* cached comparator register */
int is_hpet_enabled(void)
{
return hpet_address != 0;
}
/*
* Timer 1 for RTC, we do not use periodic interrupt feature,
* even if HPET supports periodic interrupts on Timer 1.
* The reason being, to set up a periodic interrupt in HPET, we need to
* stop the main counter. And if we do that everytime someone diables/enables
* RTC, we will have adverse effect on main kernel timer running on Timer 0.
* So, for the time being, simulate the periodic interrupt in software.
*
* hpet_rtc_timer_init() is called for the first time and during subsequent
* interuppts reinit happens through hpet_rtc_timer_reinit().
*/
int hpet_rtc_timer_init(void)
{
unsigned int cfg, cnt;
unsigned long flags;
if (!is_hpet_enabled())
return 0;
/*
* Set the counter 1 and enable the interrupts.
*/
if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
hpet_rtc_int_freq = PIE_freq;
else
hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
local_irq_save(flags);
cnt = hpet_readl(HPET_COUNTER);
cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
hpet_writel(cnt, HPET_T1_CMP);
hpet_t1_cmp = cnt;
cfg = hpet_readl(HPET_T1_CFG);
cfg &= ~HPET_TN_PERIODIC;
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_writel(cfg, HPET_T1_CFG);
local_irq_restore(flags);
return 1;
}
static void hpet_rtc_timer_reinit(void)
{
unsigned int cfg, cnt, ticks_per_int, lost_ints;
if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
cfg = hpet_readl(HPET_T1_CFG);
cfg &= ~HPET_TN_ENABLE;
hpet_writel(cfg, HPET_T1_CFG);
return;
}
if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
hpet_rtc_int_freq = PIE_freq;
else
hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
/* It is more accurate to use the comparator value than current count.*/
ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq;
hpet_t1_cmp += ticks_per_int;
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
/*
* If the interrupt handler was delayed too long, the write above tries
* to schedule the next interrupt in the past and the hardware would
* not interrupt until the counter had wrapped around.
* So we have to check that the comparator wasn't set to a past time.
*/
cnt = hpet_readl(HPET_COUNTER);
if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) {
lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1;
/* Make sure that, even with the time needed to execute
* this code, the next scheduled interrupt has been moved
* back to the future: */
lost_ints++;
hpet_t1_cmp += lost_ints * ticks_per_int;
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
if (PIE_on)
PIE_count += lost_ints;
if (printk_ratelimit())
printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
hpet_rtc_int_freq);
}
}
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
* Otherwise do the necessary changes and return 1.
*/
int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
{
if (!is_hpet_enabled())
return 0;
if (bit_mask & RTC_UIE)
UIE_on = 0;
if (bit_mask & RTC_PIE)
PIE_on = 0;
if (bit_mask & RTC_AIE)
AIE_on = 0;
return 1;
}
int hpet_set_rtc_irq_bit(unsigned long bit_mask)
{
int timer_init_reqd = 0;
if (!is_hpet_enabled())
return 0;
if (!(PIE_on | AIE_on | UIE_on))
timer_init_reqd = 1;
if (bit_mask & RTC_UIE) {
UIE_on = 1;
}
if (bit_mask & RTC_PIE) {
PIE_on = 1;
PIE_count = 0;
}
if (bit_mask & RTC_AIE) {
AIE_on = 1;
}
if (timer_init_reqd)
hpet_rtc_timer_init();
return 1;
}
int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
{
if (!is_hpet_enabled())
return 0;
alarm_time.tm_hour = hrs;
alarm_time.tm_min = min;
alarm_time.tm_sec = sec;
return 1;
}
int hpet_set_periodic_freq(unsigned long freq)
{
if (!is_hpet_enabled())
return 0;
PIE_freq = freq;
PIE_count = 0;
return 1;
}
int hpet_rtc_dropped_irq(void)
{
if (!is_hpet_enabled())
return 0;
return 1;
}
irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
{
struct rtc_time curr_time;
unsigned long rtc_int_flag = 0;
int call_rtc_interrupt = 0;
hpet_rtc_timer_reinit();
if (UIE_on | AIE_on) {
rtc_get_rtc_time(&curr_time);
}
if (UIE_on) {
if (curr_time.tm_sec != prev_update_sec) {
/* Set update int info, call real rtc int routine */
call_rtc_interrupt = 1;
rtc_int_flag = RTC_UF;
prev_update_sec = curr_time.tm_sec;
}
}
if (PIE_on) {
PIE_count++;
if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
/* Set periodic int info, call real rtc int routine */
call_rtc_interrupt = 1;
rtc_int_flag |= RTC_PF;
PIE_count = 0;
}
}
if (AIE_on) {
if ((curr_time.tm_sec == alarm_time.tm_sec) &&
(curr_time.tm_min == alarm_time.tm_min) &&
(curr_time.tm_hour == alarm_time.tm_hour)) {
/* Set alarm int info, call real rtc int routine */
call_rtc_interrupt = 1;
rtc_int_flag |= RTC_AF;
}
}
if (call_rtc_interrupt) {
rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
rtc_interrupt(rtc_int_flag, dev_id);
}
return IRQ_HANDLED;
}
#endif
static int __init nohpet_setup(char *s)
{
nohpet = 1;
return 1;
}
__setup("nohpet", nohpet_setup);

View file

@ -13,7 +13,6 @@
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/i8253.h> #include <asm/i8253.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/timer.h>
DEFINE_SPINLOCK(i8253_lock); DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock); EXPORT_SYMBOL(i8253_lock);
@ -120,6 +119,7 @@ void __init setup_pit_timer(void)
global_clock_event = &pit_clockevent; global_clock_event = &pit_clockevent;
} }
#ifndef CONFIG_X86_64
/* /*
* Since the PIT overflows every tick, its not very useful * Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free * to just read by itself. So use jiffies to emulate a free
@ -204,3 +204,5 @@ static int __init init_pit_clocksource(void)
return clocksource_register(&clocksource_pit); return clocksource_register(&clocksource_pit);
} }
arch_initcall(init_pit_clocksource); arch_initcall(init_pit_clocksource);
#endif

View file

@ -10,7 +10,6 @@
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/8253pit.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h> #include <asm/io.h>

View file

@ -444,46 +444,6 @@ void __init init_ISA_irqs (void)
} }
} }
static void setup_timer_hardware(void)
{
outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
outb_p(LATCH & 0xff , 0x40); /* LSB */
udelay(10);
outb(LATCH >> 8 , 0x40); /* MSB */
}
static int timer_resume(struct sys_device *dev)
{
setup_timer_hardware();
return 0;
}
void i8254_timer_resume(void)
{
setup_timer_hardware();
}
static struct sysdev_class timer_sysclass = {
set_kset_name("timer_pit"),
.resume = timer_resume,
};
static struct sys_device device_timer = {
.id = 0,
.cls = &timer_sysclass,
};
static int __init init_timer_sysfs(void)
{
int error = sysdev_class_register(&timer_sysclass);
if (!error)
error = sysdev_register(&device_timer);
return error;
}
device_initcall(init_timer_sysfs);
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
int i; int i;
@ -533,12 +493,6 @@ void __init init_IRQ(void)
set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
/*
* Set the clock to HZ Hz, we already have a valid
* vector now:
*/
setup_timer_hardware();
if (!acpi_ioapic) if (!acpi_ioapic)
setup_irq(2, &irq2); setup_irq(2, &irq2);
} }

362
arch/x86/kernel/mfgpt_32.c Normal file
View file

@ -0,0 +1,362 @@
/*
* Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
*
* Copyright (C) 2006, Advanced Micro Devices, Inc.
* Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
*/
/*
* We are using the 32Khz input clock - its the only one that has the
* ranges we find desirable. The following table lists the suitable
* divisors and the associated hz, minimum interval
* and the maximum interval:
*
* Divisor Hz Min Delta (S) Max Delta (S)
* 1 32000 .0005 2.048
* 2 16000 .001 4.096
* 4 8000 .002 8.192
* 8 4000 .004 16.384
* 16 2000 .008 32.768
* 32 1000 .016 65.536
* 64 500 .032 131.072
* 128 250 .064 262.144
* 256 125 .128 524.288
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/geode.h>
#define F_AVAIL 0x01
static struct mfgpt_timer_t {
int flags;
struct module *owner;
} mfgpt_timers[MFGPT_MAX_TIMERS];
/* Selected from the table above */
#define MFGPT_DIVISOR 16
#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
#define MFGPT_HZ (32000 / MFGPT_DIVISOR)
#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
#ifdef CONFIG_GEODE_MFGPT_TIMER
static int __init mfgpt_timer_setup(void);
#else
#define mfgpt_timer_setup() (0)
#endif
/* Allow for disabling of MFGPTs */
static int disable;
static int __init mfgpt_disable(char *s)
{
disable = 1;
return 1;
}
__setup("nomfgpt", mfgpt_disable);
/*
* Check whether any MFGPTs are available for the kernel to use. In most
* cases, firmware that uses AMD's VSA code will claim all timers during
* bootup; we certainly don't want to take them if they're already in use.
* In other cases (such as with VSAless OpenFirmware), the system firmware
* leaves timers available for us to use.
*/
int __init geode_mfgpt_detect(void)
{
int count = 0, i;
u16 val;
if (disable) {
printk(KERN_INFO "geode-mfgpt: Skipping MFGPT setup\n");
return 0;
}
for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
if (!(val & MFGPT_SETUP_SETUP)) {
mfgpt_timers[i].flags = F_AVAIL;
count++;
}
}
/* set up clock event device, if desired */
i = mfgpt_timer_setup();
return count;
}
int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
{
u32 msr, mask, value, dummy;
int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
return -EIO;
/*
* The register maps for these are described in sections 6.17.1.x of
* the AMD Geode CS5536 Companion Device Data Book.
*/
switch (event) {
case MFGPT_EVENT_RESET:
/*
* XXX: According to the docs, we cannot reset timers above
* 6; that is, resets for 7 and 8 will be ignored. Is this
* a problem? -dilinger
*/
msr = MFGPT_NR_MSR;
mask = 1 << (timer + 24);
break;
case MFGPT_EVENT_NMI:
msr = MFGPT_NR_MSR;
mask = 1 << (timer + shift);
break;
case MFGPT_EVENT_IRQ:
msr = MFGPT_IRQ_MSR;
mask = 1 << (timer + shift);
break;
default:
return -EIO;
}
rdmsr(msr, value, dummy);
if (enable)
value |= mask;
else
value &= ~mask;
wrmsr(msr, value, dummy);
return 0;
}
int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable)
{
u32 val, dummy;
int offset;
if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
return -EIO;
if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
return -EIO;
rdmsr(MSR_PIC_ZSEL_LOW, val, dummy);
offset = (timer % 4) * 4;
val &= ~((0xF << offset) | (0xF << (offset + 16)));
if (enable) {
val |= (irq & 0x0F) << (offset);
val |= (irq & 0x0F) << (offset + 16);
}
wrmsr(MSR_PIC_ZSEL_LOW, val, dummy);
return 0;
}
static int mfgpt_get(int timer, struct module *owner)
{
mfgpt_timers[timer].flags &= ~F_AVAIL;
mfgpt_timers[timer].owner = owner;
printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
return timer;
}
int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner)
{
int i;
if (!geode_get_dev_base(GEODE_DEV_MFGPT))
return -ENODEV;
if (timer >= MFGPT_MAX_TIMERS)
return -EIO;
if (timer < 0) {
/* Try to find an available timer */
for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
if (mfgpt_timers[i].flags & F_AVAIL)
return mfgpt_get(i, owner);
if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
break;
}
} else {
/* If they requested a specific timer, try to honor that */
if (mfgpt_timers[timer].flags & F_AVAIL)
return mfgpt_get(timer, owner);
}
/* No timers available - too bad */
return -1;
}
#ifdef CONFIG_GEODE_MFGPT_TIMER
/*
* The MFPGT timers on the CS5536 provide us with suitable timers to use
* as clock event sources - not as good as a HPET or APIC, but certainly
* better then the PIT. This isn't a general purpose MFGPT driver, but
* a simplified one designed specifically to act as a clock event source.
* For full details about the MFGPT, please consult the CS5536 data sheet.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
static u16 mfgpt_event_clock;
static int irq = 7;
static int __init mfgpt_setup(char *str)
{
get_option(&str, &irq);
return 1;
}
__setup("mfgpt_irq=", mfgpt_setup);
static inline void mfgpt_disable_timer(u16 clock)
{
u16 val = geode_mfgpt_read(clock, MFGPT_REG_SETUP);
geode_mfgpt_write(clock, MFGPT_REG_SETUP, val & ~MFGPT_SETUP_CNTEN);
}
static int mfgpt_next_event(unsigned long, struct clock_event_device *);
static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
static struct clock_event_device mfgpt_clockevent = {
.name = "mfgpt-timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = mfgpt_set_mode,
.set_next_event = mfgpt_next_event,
.rating = 250,
.cpumask = CPU_MASK_ALL,
.shift = 32
};
static inline void mfgpt_start_timer(u16 clock, u16 delta)
{
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
}
static void mfgpt_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
mfgpt_disable_timer(mfgpt_event_clock);
if (mode == CLOCK_EVT_MODE_PERIODIC)
mfgpt_start_timer(mfgpt_event_clock, MFGPT_PERIODIC);
mfgpt_tick_mode = mode;
}
static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
{
mfgpt_start_timer(mfgpt_event_clock, delta);
return 0;
}
/* Assume (foolishly?), that this interrupt was due to our tick */
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
{
if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
return IRQ_HANDLED;
/* Turn off the clock */
mfgpt_disable_timer(mfgpt_event_clock);
/* Clear the counter */
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
/* Restart the clock in periodic mode */
if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
}
mfgpt_clockevent.event_handler(&mfgpt_clockevent);
return IRQ_HANDLED;
}
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
.mask = CPU_MASK_NONE,
.name = "mfgpt-timer"
};
static int __init mfgpt_timer_setup(void)
{
int timer, ret;
u16 val;
timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING,
THIS_MODULE);
if (timer < 0) {
printk(KERN_ERR
"mfgpt-timer: Could not allocate a MFPGT timer\n");
return -ENODEV;
}
mfgpt_event_clock = timer;
/* Set the clock scale and enable the event mode for CMP2 */
val = MFGPT_SCALE | (3 << 8);
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
/* Set up the IRQ on the MFGPT side */
if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) {
printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
return -EIO;
}
/* And register it with the kernel */
ret = setup_irq(irq, &mfgptirq);
if (ret) {
printk(KERN_ERR
"mfgpt-timer: Unable to set up the interrupt.\n");
goto err;
}
/* Set up the clock event */
mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32);
mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
&mfgpt_clockevent);
mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
&mfgpt_clockevent);
printk(KERN_INFO
"mfgpt-timer: registering the MFGT timer as a clock event.\n");
clockevents_register_device(&mfgpt_clockevent);
return 0;
err:
geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq);
printk(KERN_ERR
"mfgpt-timer: Unable to set up the MFGPT clock source\n");
return -EIO;
}
#endif

View file

@ -353,7 +353,8 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
* Take the local apic timer and PIT/HPET into account. We don't * Take the local apic timer and PIT/HPET into account. We don't
* know which one is active, when we have highres/dyntick on * know which one is active, when we have highres/dyntick on
*/ */
sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_cpu(cpu).irqs[0]; sum = per_cpu(irq_stat, cpu).apic_timer_irqs +
per_cpu(irq_stat, cpu).irq0_irqs;
/* if the none of the timers isn't firing, this cpu isn't doing much */ /* if the none of the timers isn't firing, this cpu isn't doing much */
if (!touched && last_irq_sums[cpu] == sum) { if (!touched && last_irq_sums[cpu] == sum) {

View file

@ -329,7 +329,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
touched = 1; touched = 1;
} }
sum = read_pda(apic_timer_irqs); sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
if (__get_cpu_var(nmi_touch)) { if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0; __get_cpu_var(nmi_touch) = 0;
touched = 1; touched = 1;

View file

@ -38,6 +38,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/tick.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
@ -208,6 +209,8 @@ void cpu_idle (void)
if (__get_cpu_var(cpu_idle_state)) if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0; __get_cpu_var(cpu_idle_state) = 0;
tick_nohz_stop_sched_tick();
rmb(); rmb();
idle = pm_idle; idle = pm_idle;
if (!idle) if (!idle)
@ -228,6 +231,7 @@ void cpu_idle (void)
__exit_idle(); __exit_idle();
} }
tick_nohz_restart_sched_tick();
preempt_enable_no_resched(); preempt_enable_no_resched();
schedule(); schedule();
preempt_disable(); preempt_disable();

View file

@ -4,6 +4,8 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/hpet.h>
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
@ -47,3 +49,206 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quir
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
#endif #endif
#if defined(CONFIG_HPET_TIMER)
unsigned long force_hpet_address;
static enum {
NONE_FORCE_HPET_RESUME,
OLD_ICH_FORCE_HPET_RESUME,
ICH_FORCE_HPET_RESUME
} force_hpet_resume_type;
static void __iomem *rcba_base;
static void ich_force_hpet_resume(void)
{
u32 val;
if (!force_hpet_address)
return;
if (rcba_base == NULL)
BUG();
/* read the Function Disable register, dword mode only */
val = readl(rcba_base + 0x3404);
if (!(val & 0x80)) {
/* HPET disabled in HPTC. Trying to enable */
writel(val | 0x80, rcba_base + 0x3404);
}
val = readl(rcba_base + 0x3404);
if (!(val & 0x80))
BUG();
else
printk(KERN_DEBUG "Force enabled HPET at resume\n");
return;
}
static void ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
u32 uninitialized_var(rcba);
int err = 0;
if (hpet_address || force_hpet_address)
return;
pci_read_config_dword(dev, 0xF0, &rcba);
rcba &= 0xFFFFC000;
if (rcba == 0) {
printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n");
return;
}
/* use bits 31:14, 16 kB aligned */
rcba_base = ioremap_nocache(rcba, 0x4000);
if (rcba_base == NULL) {
printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n");
return;
}
/* read the Function Disable register, dword mode only */
val = readl(rcba_base + 0x3404);
if (val & 0x80) {
/* HPET is enabled in HPTC. Just not reported by BIOS */
val = val & 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
force_hpet_address);
iounmap(rcba_base);
return;
}
/* HPET disabled in HPTC. Trying to enable */
writel(val | 0x80, rcba_base + 0x3404);
val = readl(rcba_base + 0x3404);
if (!(val & 0x80)) {
err = 1;
} else {
val = val & 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
}
if (err) {
force_hpet_address = 0;
iounmap(rcba_base);
printk(KERN_DEBUG "Failed to force enable HPET\n");
} else {
force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
force_hpet_address);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
ich_force_enable_hpet);
static struct pci_dev *cached_dev;
static void old_ich_force_hpet_resume(void)
{
u32 val;
u32 uninitialized_var(gen_cntl);
if (!force_hpet_address || !cached_dev)
return;
pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
gen_cntl &= (~(0x7 << 15));
gen_cntl |= (0x4 << 15);
pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
val = gen_cntl >> 15;
val &= 0x7;
if (val == 0x4)
printk(KERN_DEBUG "Force enabled HPET at resume\n");
else
BUG();
}
static void old_ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
u32 uninitialized_var(gen_cntl);
if (hpet_address || force_hpet_address)
return;
pci_read_config_dword(dev, 0xD0, &gen_cntl);
/*
* Bit 17 is HPET enable bit.
* Bit 16:15 control the HPET base address.
*/
val = gen_cntl >> 15;
val &= 0x7;
if (val & 0x4) {
val &= 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
printk(KERN_DEBUG "HPET at base address 0x%lx\n",
force_hpet_address);
return;
}
/*
* HPET is disabled. Trying enabling at FED00000 and check
* whether it sticks
*/
gen_cntl &= (~(0x7 << 15));
gen_cntl |= (0x4 << 15);
pci_write_config_dword(dev, 0xD0, gen_cntl);
pci_read_config_dword(dev, 0xD0, &gen_cntl);
val = gen_cntl >> 15;
val &= 0x7;
if (val & 0x4) {
/* HPET is enabled in HPTC. Just not reported by BIOS */
val &= 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
force_hpet_address);
cached_dev = dev;
force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
return;
}
printk(KERN_DEBUG "Failed to force enable HPET\n");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
old_ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
old_ich_force_enable_hpet);
void force_hpet_resume(void)
{
switch (force_hpet_resume_type) {
case ICH_FORCE_HPET_RESUME:
return ich_force_hpet_resume();
case OLD_ICH_FORCE_HPET_RESUME:
return old_ich_force_hpet_resume();
default:
break;
}
}
#endif

View file

@ -546,6 +546,37 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
#endif #endif
} }
#define ENABLE_C1E_MASK 0x18000000
#define CPUID_PROCESSOR_SIGNATURE 1
#define CPUID_XFAM 0x0ff00000
#define CPUID_XFAM_K8 0x00000000
#define CPUID_XFAM_10H 0x00100000
#define CPUID_XFAM_11H 0x00200000
#define CPUID_XMOD 0x000f0000
#define CPUID_XMOD_REV_F 0x00040000
/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
static __cpuinit int amd_apic_timer_broken(void)
{
u32 lo, hi;
u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
switch (eax & CPUID_XFAM) {
case CPUID_XFAM_K8:
if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
break;
case CPUID_XFAM_10H:
case CPUID_XFAM_11H:
rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
if (lo & ENABLE_C1E_MASK)
return 1;
break;
default:
/* err on the side of caution */
return 1;
}
return 0;
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c) static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{ {
unsigned level; unsigned level;
@ -617,6 +648,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
/* Family 10 doesn't support C states in MWAIT so don't use it */ /* Family 10 doesn't support C states in MWAIT so don't use it */
if (c->x86 == 0x10 && !force_mwait) if (c->x86 == 0x10 && !force_mwait)
clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
if (amd_apic_timer_broken())
disable_apic_timer = 1;
} }
static void __cpuinit detect_ht(struct cpuinfo_x86 *c) static void __cpuinit detect_ht(struct cpuinfo_x86 *c)

View file

@ -223,8 +223,6 @@ void __cpuinit smp_callin(void)
local_irq_disable(); local_irq_disable();
Dprintk("Stack at about %p\n",&cpuid); Dprintk("Stack at about %p\n",&cpuid);
disable_APIC_timer();
/* /*
* Save our processor parameters * Save our processor parameters
*/ */
@ -348,8 +346,6 @@ void __cpuinit start_secondary(void)
enable_8259A_irq(0); enable_8259A_irq(0);
} }
enable_APIC_timer();
/* /*
* The sibling maps must be set before turing the online map on for * The sibling maps must be set before turing the online map on for
* this cpu * this cpu

View file

@ -157,6 +157,9 @@ EXPORT_SYMBOL(profile_pc);
*/ */
irqreturn_t timer_interrupt(int irq, void *dev_id) irqreturn_t timer_interrupt(int irq, void *dev_id)
{ {
/* Keep nmi watchdog up to date */
per_cpu(irq_stat, smp_processor_id()).irq0_irqs++;
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
if (timer_ack) { if (timer_ack) {
/* /*

View file

@ -28,11 +28,12 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/clockchips.h>
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#include <acpi/achware.h> /* for PM timer frequency */ #include <acpi/achware.h> /* for PM timer frequency */
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
#endif #endif
#include <asm/8253pit.h>
#include <asm/i8253.h> #include <asm/i8253.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/vsyscall.h> #include <asm/vsyscall.h>
@ -47,12 +48,8 @@
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/vgtod.h> #include <asm/vgtod.h>
static char *timename = NULL;
DEFINE_SPINLOCK(rtc_lock); DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock); EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
@ -153,45 +150,12 @@ int update_persistent_clock(struct timespec now)
return set_rtc_mmss(now.tv_sec); return set_rtc_mmss(now.tv_sec);
} }
void main_timer_handler(void) static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
{ {
/* add_pda(irq0_irqs, 1);
* Here we are in the timer irq handler. We have irqs locally disabled (so we
* don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
* on the other CPU, so we need a lock. We also need to lock the vsyscall
* variables, because both do_timer() and us change them -arca+vojtech
*/
write_seqlock(&xtime_lock); global_clock_event->event_handler(global_clock_event);
/*
* Do the timer stuff.
*/
do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the profiling,
* except when we simulate SMP mode on a uniprocessor system, in that case we
* have to call the local interrupt handler.
*/
if (!using_apic_timer)
smp_local_timer_interrupt();
write_sequnlock(&xtime_lock);
}
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
if (apic_runs_main_timer > 1)
return IRQ_HANDLED;
main_timer_handler();
if (using_apic_timer)
smp_send_timer_broadcast_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -292,97 +256,21 @@ static unsigned int __init tsc_calibrate_cpu_khz(void)
return pmc_now * tsc_khz / (tsc_now - tsc_start); return pmc_now * tsc_khz / (tsc_now - tsc_start);
} }
/*
* pit_calibrate_tsc() uses the speaker output (channel 2) of
* the PIT. This is better than using the timer interrupt output,
* because we can read the value of the speaker with just one inb(),
* where we need three i/o operations for the interrupt channel.
* We count how many ticks the TSC does in 50 ms.
*/
static unsigned int __init pit_calibrate_tsc(void)
{
unsigned long start, end;
unsigned long flags;
spin_lock_irqsave(&i8253_lock, flags);
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
outb(0xb0, 0x43);
outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
start = get_cycles_sync();
while ((inb(0x61) & 0x20) == 0);
end = get_cycles_sync();
spin_unlock_irqrestore(&i8253_lock, flags);
return (end - start) / 50;
}
#define PIT_MODE 0x43
#define PIT_CH0 0x40
static void __pit_init(int val, u8 mode)
{
unsigned long flags;
spin_lock_irqsave(&i8253_lock, flags);
outb_p(mode, PIT_MODE);
outb_p(val & 0xff, PIT_CH0); /* LSB */
outb_p(val >> 8, PIT_CH0); /* MSB */
spin_unlock_irqrestore(&i8253_lock, flags);
}
void __init pit_init(void)
{
__pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
}
void pit_stop_interrupt(void)
{
__pit_init(0, 0x30); /* mode 0 */
}
void stop_timer_interrupt(void)
{
char *name;
if (hpet_address) {
name = "HPET";
hpet_timer_stop_set_go(0);
} else {
name = "PIT";
pit_stop_interrupt();
}
printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
}
static struct irqaction irq0 = { static struct irqaction irq0 = {
.handler = timer_interrupt, .handler = timer_event_interrupt,
.flags = IRQF_DISABLED | IRQF_IRQPOLL, .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
.mask = CPU_MASK_NONE, .mask = CPU_MASK_NONE,
.name = "timer" .name = "timer"
}; };
void __init time_init(void) void __init time_init(void)
{ {
if (nohpet) if (!hpet_enable())
hpet_address = 0; setup_pit_timer();
if (hpet_arch_init()) setup_irq(0, &irq0);
hpet_address = 0;
if (hpet_use_timer) { tsc_calibrate();
/* set tick_nsec to use the proper rate for HPET */
tick_nsec = TICK_NSEC_HPET;
tsc_khz = hpet_calibrate_tsc();
timename = "HPET";
} else {
pit_init();
tsc_khz = pit_calibrate_tsc();
timename = "PIT";
}
cpu_khz = tsc_khz; cpu_khz = tsc_khz;
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
@ -398,50 +286,7 @@ void __init time_init(void)
else else
vgetcpu_mode = VGETCPU_LSL; vgetcpu_mode = VGETCPU_LSL;
set_cyc2ns_scale(tsc_khz);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000); cpu_khz / 1000, cpu_khz % 1000);
init_tsc_clocksource(); init_tsc_clocksource();
setup_irq(0, &irq0);
} }
/*
* sysfs support for the timer.
*/
static int timer_suspend(struct sys_device *dev, pm_message_t state)
{
return 0;
}
static int timer_resume(struct sys_device *dev)
{
if (hpet_address)
hpet_reenable();
else
i8254_timer_resume();
return 0;
}
static struct sysdev_class timer_sysclass = {
.resume = timer_resume,
.suspend = timer_suspend,
set_kset_name("timer"),
};
/* XXX this sysfs stuff should probably go elsewhere later -john */
static struct sys_device device_timer = {
.id = 0,
.cls = &timer_sysclass,
};
static int time_init_device(void)
{
int error = sysdev_class_register(&timer_sysclass);
if (!error)
error = sysdev_register(&device_timer);
return error;
}
device_initcall(time_init_device);

View file

@ -6,7 +6,9 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/acpi_pmtmr.h>
#include <asm/hpet.h>
#include <asm/timex.h> #include <asm/timex.h>
static int notsc __initdata = 0; static int notsc __initdata = 0;
@ -18,7 +20,7 @@ EXPORT_SYMBOL(tsc_khz);
static unsigned int cyc2ns_scale __read_mostly; static unsigned int cyc2ns_scale __read_mostly;
void set_cyc2ns_scale(unsigned long khz) static inline void set_cyc2ns_scale(unsigned long khz)
{ {
cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
} }
@ -118,6 +120,95 @@ core_initcall(cpufreq_tsc);
#endif #endif
#define MAX_RETRIES 5
#define SMI_TRESHOLD 50000
/*
* Read TSC and the reference counters. Take care of SMI disturbance
*/
static unsigned long __init tsc_read_refs(unsigned long *pm,
unsigned long *hpet)
{
unsigned long t1, t2;
int i;
for (i = 0; i < MAX_RETRIES; i++) {
t1 = get_cycles_sync();
if (hpet)
*hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
else
*pm = acpi_pm_read_early();
t2 = get_cycles_sync();
if ((t2 - t1) < SMI_TRESHOLD)
return t2;
}
return ULONG_MAX;
}
/**
* tsc_calibrate - calibrate the tsc on boot
*/
void __init tsc_calibrate(void)
{
unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
int hpet = is_hpet_enabled();
local_irq_save(flags);
tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
outb(0xb0, 0x43);
outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
tr1 = get_cycles_sync();
while ((inb(0x61) & 0x20) == 0);
tr2 = get_cycles_sync();
tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
local_irq_restore(flags);
/*
* Preset the result with the raw and inaccurate PIT
* calibration value
*/
tsc_khz = (tr2 - tr1) / 50;
/* hpet or pmtimer available ? */
if (!hpet && !pm1 && !pm2) {
printk(KERN_INFO "TSC calibrated against PIT\n");
return;
}
/* Check, whether the sampling was disturbed by an SMI */
if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) {
printk(KERN_WARNING "TSC calibration disturbed by SMI, "
"using PIT calibration result\n");
return;
}
tsc2 = (tsc2 - tsc1) * 1000000L;
if (hpet) {
printk(KERN_INFO "TSC calibrated against HPET\n");
if (hpet2 < hpet1)
hpet2 += 0x100000000;
hpet2 -= hpet1;
tsc1 = (hpet2 * hpet_readl(HPET_PERIOD)) / 1000000;
} else {
printk(KERN_INFO "TSC calibrated against PM_TIMER\n");
if (pm2 < pm1)
pm2 += ACPI_PM_OVRRUN;
pm2 -= pm1;
tsc1 = (pm2 * 1000000000) / PMTMR_TICKS_PER_SEC;
}
tsc_khz = tsc2 / tsc1;
set_cyc2ns_scale(tsc_khz);
}
/* /*
* Make an educated guess if the TSC is trustworthy and synchronized * Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs. * over all CPUs.

View file

@ -36,6 +36,18 @@ config GENERIC_CMOS_UPDATE
bool bool
default y default y
config CLOCKSOURCE_WATCHDOG
bool
default y
config GENERIC_CLOCKEVENTS
bool
default y
config GENERIC_CLOCKEVENTS_BROADCAST
bool
default y
config ZONE_DMA32 config ZONE_DMA32
bool bool
default y default y
@ -130,6 +142,8 @@ source "init/Kconfig"
menu "Processor type and features" menu "Processor type and features"
source "kernel/time/Kconfig"
choice choice
prompt "Subarchitecture Type" prompt "Subarchitecture Type"
default X86_PC default X86_PC

View file

@ -276,21 +276,12 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
{ {
#ifdef CONFIG_GENERIC_CLOCKEVENTS
unsigned long reason; unsigned long reason;
reason = pr->power.timer_broadcast_on_state < INT_MAX ? reason = pr->power.timer_broadcast_on_state < INT_MAX ?
CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
clockevents_notify(reason, &pr->id); clockevents_notify(reason, &pr->id);
#else
cpumask_t mask = cpumask_of_cpu(pr->id);
if (pr->power.timer_broadcast_on_state < INT_MAX)
on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
else
on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
#endif
} }
/* Power(C) State timer broadcast control */ /* Power(C) State timer broadcast control */
@ -298,8 +289,6 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx, struct acpi_processor_cx *cx,
int broadcast) int broadcast)
{ {
#ifdef CONFIG_GENERIC_CLOCKEVENTS
int state = cx - pr->power.states; int state = cx - pr->power.states;
if (state >= pr->power.timer_broadcast_on_state) { if (state >= pr->power.timer_broadcast_on_state) {
@ -309,7 +298,6 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
CLOCK_EVT_NOTIFY_BROADCAST_EXIT; CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
clockevents_notify(reason, &pr->id); clockevents_notify(reason, &pr->id);
} }
#endif
} }
#else #else

View file

@ -17,7 +17,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/input.h> #include <linux/input.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/8253pit.h>
#include <asm/io.h> #include <asm/io.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
@ -28,6 +27,7 @@ MODULE_LICENSE("GPL");
/* Use the global PIT lock ! */ /* Use the global PIT lock ! */
#include <asm/i8253.h> #include <asm/i8253.h>
#else #else
#include <asm/8253pit.h>
static DEFINE_SPINLOCK(i8253_lock); static DEFINE_SPINLOCK(i8253_lock);
#endif #endif

View file

@ -1,5 +0,0 @@
#ifdef CONFIG_X86_32
# include "8253pit_32.h"
#else
# include "8253pit_64.h"
#endif

View file

@ -1,12 +0,0 @@
/*
* 8253/8254 Programmable Interval Timer
*/
#ifndef _8253PIT_H
#define _8253PIT_H
#include <asm/timex.h>
#define PIT_TICK_RATE CLOCK_TICK_RATE
#endif

View file

@ -1,10 +0,0 @@
/*
* 8253/8254 Programmable Interval Timer
*/
#ifndef _8253PIT_H
#define _8253PIT_H
#define PIT_TICK_RATE 1193182UL
#endif

View file

@ -19,7 +19,7 @@
extern int apic_verbosity; extern int apic_verbosity;
extern int apic_runs_main_timer; extern int apic_runs_main_timer;
extern int ioapic_force; extern int ioapic_force;
extern int apic_mapped; extern int disable_apic_timer;
/* /*
* Define the default level of output to be very little * Define the default level of output to be very little
@ -79,8 +79,6 @@ extern void smp_local_timer_interrupt (void);
extern void setup_boot_APIC_clock (void); extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void); extern void setup_secondary_APIC_clock (void);
extern int APIC_init_uniprocessor (void); extern int APIC_init_uniprocessor (void);
extern void disable_APIC_timer(void);
extern void enable_APIC_timer(void);
extern void setup_apic_routing(void); extern void setup_apic_routing(void);
extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
@ -95,10 +93,6 @@ extern int apic_is_clustered_box(void);
#define K8_APIC_EXT_INT_MSG_EXT 0x7 #define K8_APIC_EXT_INT_MSG_EXT 0x7
#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
void smp_send_timer_broadcast_ipi(void);
void switch_APIC_timer_to_ipi(void *cpumask);
void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1 #define ARCH_APICTIMER_STOPS_ON_C3 1
extern unsigned boot_cpu_id; extern unsigned boot_cpu_id;

View file

@ -156,4 +156,54 @@ static inline int is_geode(void)
return (is_geode_gx() || is_geode_lx()); return (is_geode_gx() || is_geode_lx());
} }
/* MFGPTs */
#define MFGPT_MAX_TIMERS 8
#define MFGPT_TIMER_ANY -1
#define MFGPT_DOMAIN_WORKING 1
#define MFGPT_DOMAIN_STANDBY 2
#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
#define MFGPT_CMP1 0
#define MFGPT_CMP2 1
#define MFGPT_EVENT_IRQ 0
#define MFGPT_EVENT_NMI 1
#define MFGPT_EVENT_RESET 3
#define MFGPT_REG_CMP1 0
#define MFGPT_REG_CMP2 2
#define MFGPT_REG_COUNTER 4
#define MFGPT_REG_SETUP 6
#define MFGPT_SETUP_CNTEN (1 << 15)
#define MFGPT_SETUP_CMP2 (1 << 14)
#define MFGPT_SETUP_CMP1 (1 << 13)
#define MFGPT_SETUP_SETUP (1 << 12)
#define MFGPT_SETUP_STOPEN (1 << 11)
#define MFGPT_SETUP_EXTEN (1 << 10)
#define MFGPT_SETUP_REVEN (1 << 5)
#define MFGPT_SETUP_CLKSEL (1 << 4)
static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
{
u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
outw(value, base + reg + (timer * 8));
}
static inline u16 geode_mfgpt_read(int timer, u16 reg)
{
u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
return inw(base + reg + (timer * 8));
}
extern int __init geode_mfgpt_detect(void);
extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable);
extern int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner);
#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
#endif #endif

View file

@ -9,6 +9,7 @@ typedef struct {
unsigned long idle_timestamp; unsigned long idle_timestamp;
unsigned int __nmi_count; /* arch dependent */ unsigned int __nmi_count; /* arch dependent */
unsigned int apic_timer_irqs; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq0_irqs;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat); DECLARE_PER_CPU(irq_cpustat_t, irq_stat);

View file

@ -1,5 +1,93 @@
#ifdef CONFIG_X86_32 #ifndef ASM_X86_HPET_H
# include "hpet_32.h" #define ASM_X86_HPET_H
#ifdef CONFIG_HPET_TIMER
/*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
#define HPET_MMAP_SIZE 1024
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_T1_CFG 0x120
#define HPET_T1_CMP 0x128
#define HPET_T1_ROUTE 0x130
#define HPET_T2_CFG 0x140
#define HPET_T2_CMP 0x148
#define HPET_T2_ROUTE 0x150
#define HPET_ID_REV 0x000000ff
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_64BIT 0x00002000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_NUMBER_SHIFT 8
#define HPET_ID_VENDOR_SHIFT 16
#define HPET_ID_VENDOR_8086 0x8086
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
#define HPET_TN_LEVEL 0x0002
#define HPET_TN_ENABLE 0x0004
#define HPET_TN_PERIODIC 0x0008
#define HPET_TN_PERIODIC_CAP 0x0010
#define HPET_TN_64BIT_CAP 0x0020
#define HPET_TN_SETVAL 0x0040
#define HPET_TN_32BIT 0x0100
#define HPET_TN_ROUTE 0x3e00
#define HPET_TN_FSB 0x4000
#define HPET_TN_FSB_CAP 0x8000
#define HPET_TN_ROUTE_SHIFT 9
/* Max HPET Period is 10^8 femto sec as in HPET spec */
#define HPET_MAX_PERIOD 100000000UL
/*
* Min HPET period is 10^5 femto sec just for safety. If it is less than this,
* then 32 bit HPET counter wrapsaround in less than 0.5 sec.
*/
#define HPET_MIN_PERIOD 100000UL
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern unsigned long hpet_readl(unsigned long a);
extern void force_hpet_resume(void);
#ifdef CONFIG_HPET_EMULATE_RTC
#include <linux/interrupt.h>
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
#endif /* CONFIG_HPET_EMULATE_RTC */
#else #else
# include "hpet_64.h"
#endif static inline int hpet_enable(void) { return 0; }
static inline unsigned long hpet_readl(unsigned long a) { return 0; }
#endif /* CONFIG_HPET_TIMER */
#endif /* ASM_X86_HPET_H */

View file

@ -1,90 +0,0 @@
#ifndef _I386_HPET_H
#define _I386_HPET_H
#ifdef CONFIG_HPET_TIMER
/*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
#define HPET_MMAP_SIZE 1024
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_T1_CFG 0x120
#define HPET_T1_CMP 0x128
#define HPET_T1_ROUTE 0x130
#define HPET_T2_CFG 0x140
#define HPET_T2_CMP 0x148
#define HPET_T2_ROUTE 0x150
#define HPET_ID_REV 0x000000ff
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_64BIT 0x00002000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_NUMBER_SHIFT 8
#define HPET_ID_VENDOR_SHIFT 16
#define HPET_ID_VENDOR_8086 0x8086
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
#define HPET_TN_LEVEL 0x0002
#define HPET_TN_ENABLE 0x0004
#define HPET_TN_PERIODIC 0x0008
#define HPET_TN_PERIODIC_CAP 0x0010
#define HPET_TN_64BIT_CAP 0x0020
#define HPET_TN_SETVAL 0x0040
#define HPET_TN_32BIT 0x0100
#define HPET_TN_ROUTE 0x3e00
#define HPET_TN_FSB 0x4000
#define HPET_TN_FSB_CAP 0x8000
#define HPET_TN_ROUTE_SHIFT 9
/* Max HPET Period is 10^8 femto sec as in HPET spec */
#define HPET_MAX_PERIOD 100000000UL
/*
* Min HPET period is 10^5 femto sec just for safety. If it is less than this,
* then 32 bit HPET counter wrapsaround in less than 0.5 sec.
*/
#define HPET_MIN_PERIOD 100000UL
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
#ifdef CONFIG_HPET_EMULATE_RTC
#include <linux/interrupt.h>
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
#endif /* CONFIG_HPET_EMULATE_RTC */
#else
static inline int hpet_enable(void) { return 0; }
#endif /* CONFIG_HPET_TIMER */
#endif /* _I386_HPET_H */

View file

@ -1,18 +0,0 @@
#ifndef _ASM_X8664_HPET_H
#define _ASM_X8664_HPET_H 1
#include <asm/hpet_32.h>
#define HPET_TICK_RATE (HZ * 100000UL)
extern int hpet_rtc_timer_init(void);
extern int hpet_arch_init(void);
extern int hpet_timer_stop_set_go(unsigned long tick);
extern int hpet_reenable(void);
extern unsigned int hpet_calibrate_tsc(void);
extern int hpet_use_timer;
extern unsigned long hpet_period;
extern unsigned long hpet_tick;
#endif

View file

@ -1,5 +1,15 @@
#ifdef CONFIG_X86_32 #ifndef __ASM_I8253_H__
# include "i8253_32.h" #define __ASM_I8253_H__
#else
# include "i8253_64.h" /* i8253A PIT registers */
#endif #define PIT_MODE 0x43
#define PIT_CH0 0x40
#define PIT_CH2 0x42
extern spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
extern void setup_pit_timer(void);
#endif /* __ASM_I8253_H__ */

View file

@ -1,17 +0,0 @@
#ifndef __ASM_I8253_H__
#define __ASM_I8253_H__
#include <linux/clockchips.h>
/* i8253A PIT registers */
#define PIT_MODE 0x43
#define PIT_CH0 0x40
#define PIT_CH2 0x42
extern spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
extern void setup_pit_timer(void);
#endif /* __ASM_I8253_H__ */

View file

@ -1,6 +0,0 @@
#ifndef __ASM_I8253_H__
#define __ASM_I8253_H__
extern spinlock_t i8253_lock;
#endif /* __ASM_I8253_H__ */

View file

@ -29,6 +29,7 @@ struct x8664_pda {
short isidle; short isidle;
struct mm_struct *active_mm; struct mm_struct *active_mm;
unsigned apic_timer_irqs; unsigned apic_timer_irqs;
unsigned irq0_irqs;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
extern struct x8664_pda *_cpu_pda[]; extern struct x8664_pda *_cpu_pda[];

View file

@ -51,9 +51,6 @@ extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
extern void load_gs_index(unsigned gs); extern void load_gs_index(unsigned gs);
extern void stop_timer_interrupt(void);
extern void main_timer_handler(void);
extern unsigned long end_pfn_map; extern unsigned long end_pfn_map;
extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp); extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
@ -90,14 +87,10 @@ extern int timer_over_8254;
extern int gsi_irq_sharing(int gsi); extern int gsi_irq_sharing(int gsi);
extern void smp_local_timer_interrupt(void);
extern int force_mwait; extern int force_mwait;
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
void i8254_timer_resume(void);
#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) #define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
#define round_down(x,y) ((x) & ~((y)-1)) #define round_down(x,y) ((x) & ~((y)-1))

View file

@ -1,5 +1,18 @@
#ifdef CONFIG_X86_32 /* x86 architecture timex specifications */
# include "timex_32.h" #ifndef _ASM_X86_TIMEX_H
#define _ASM_X86_TIMEX_H
#include <asm/processor.h>
#include <asm/tsc.h>
#ifdef CONFIG_X86_ELAN
# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */
#else #else
# include "timex_64.h" # define PIT_TICK_RATE 1193182 /* Underlying HZ */
#endif
#define CLOCK_TICK_RATE PIT_TICK_RATE
extern int read_current_timer(unsigned long *timer_value);
#define ARCH_HAS_READ_CURRENT_TIMER 1
#endif #endif

View file

@ -1,22 +0,0 @@
/*
* linux/include/asm-i386/timex.h
*
* i386 architecture timex specifications
*/
#ifndef _ASMi386_TIMEX_H
#define _ASMi386_TIMEX_H
#include <asm/processor.h>
#include <asm/tsc.h>
#ifdef CONFIG_X86_ELAN
# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
#else
# define CLOCK_TICK_RATE 1193182 /* Underlying HZ */
#endif
extern int read_current_timer(unsigned long *timer_value);
#define ARCH_HAS_READ_CURRENT_TIMER 1
#endif

View file

@ -1,31 +0,0 @@
/*
* linux/include/asm-x86_64/timex.h
*
* x86-64 architecture timex specifications
*/
#ifndef _ASMx8664_TIMEX_H
#define _ASMx8664_TIMEX_H
#include <asm/8253pit.h>
#include <asm/msr.h>
#include <asm/vsyscall.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/tsc.h>
#include <linux/compiler.h>
#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
extern int read_current_timer(unsigned long *timer_value);
#define ARCH_HAS_READ_CURRENT_TIMER 1
#define USEC_PER_TICK (USEC_PER_SEC / HZ)
#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
#define NS_SCALE 10 /* 2^10, carefully chosen */
#define US_SCALE 32 /* 2^32, arbitralrily chosen */
extern void mark_tsc_unstable(char *msg);
extern void set_cyc2ns_scale(unsigned long khz);
#endif

View file

@ -1,13 +1,14 @@
/* /*
* linux/include/asm-i386/tsc.h * x86 TSC related functions
*
* i386 TSC related functions
*/ */
#ifndef _ASM_i386_TSC_H #ifndef _ASM_X86_TSC_H
#define _ASM_i386_TSC_H #define _ASM_X86_TSC_H
#include <asm/processor.h> #include <asm/processor.h>
#define NS_SCALE 10 /* 2^10, carefully chosen */
#define US_SCALE 32 /* 2^32, arbitralrily chosen */
/* /*
* Standard way to access the cycle counter. * Standard way to access the cycle counter.
*/ */
@ -72,4 +73,8 @@ int check_tsc_unstable(void);
extern void check_tsc_sync_source(int cpu); extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void); extern void check_tsc_sync_target(void);
#ifdef CONFIG_X86_64
extern void tsc_calibrate(void);
#endif
#endif #endif

View file

@ -29,9 +29,6 @@ enum vsyscall_num {
#define VGETCPU_RDTSCP 1 #define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2 #define VGETCPU_LSL 2
#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
extern int __vgetcpu_mode; extern int __vgetcpu_mode;
extern volatile unsigned long __jiffies; extern volatile unsigned long __jiffies;

View file

@ -8,7 +8,7 @@
#ifndef _LINUX_CLOCKCHIPS_H #ifndef _LINUX_CLOCKCHIPS_H
#define _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
@ -126,11 +126,14 @@ extern int clockevents_register_notifier(struct notifier_block *nb);
extern int clockevents_program_event(struct clock_event_device *dev, extern int clockevents_program_event(struct clock_event_device *dev,
ktime_t expires, ktime_t now); ktime_t expires, ktime_t now);
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void clockevents_notify(unsigned long reason, void *arg); extern void clockevents_notify(unsigned long reason, void *arg);
#else #else
# define clockevents_notify(reason, arg) do { } while (0)
#endif
#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
static inline void clockevents_resume_events(void) { }
#define clockevents_notify(reason, arg) do { } while (0) #define clockevents_notify(reason, arg) do { } while (0)
#endif #endif

View file

@ -36,8 +36,6 @@
/* LATCH is used in the interval timer and ftape setup. */ /* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ #define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
#define LATCH_HPET ((HPET_TICK_RATE + HZ/2) / HZ)
/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can /* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
* improve accuracy by shifting LSH bits, hence calculating: * improve accuracy by shifting LSH bits, hence calculating:
* (NOM << LSH) / DEN * (NOM << LSH) / DEN
@ -53,13 +51,9 @@
/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ /* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) #define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
#define ACTHZ_HPET (SH_DIV (HPET_TICK_RATE, LATCH_HPET, 8))
/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */ /* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8)) #define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
#define TICK_NSEC_HPET (SH_DIV(1000000UL * 1000, ACTHZ_HPET, 8))
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)

View file

@ -2242,6 +2242,7 @@
#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 #define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5
#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 #define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6
#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db #define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd #define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 #define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 #define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2

View file

@ -23,3 +23,8 @@ config HIGH_RES_TIMERS
hardware is not capable then this option only increases hardware is not capable then this option only increases
the size of the kernel image. the size of the kernel image.
config GENERIC_CLOCKEVENTS_BUILD
bool
default y
depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR

View file

@ -1,6 +1,6 @@
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o

View file

@ -194,6 +194,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
local_irq_restore(flags); local_irq_restore(flags);
} }
#ifdef CONFIG_GENERIC_CLOCKEVENTS
/** /**
* clockevents_notify - notification about relevant events * clockevents_notify - notification about relevant events
*/ */
@ -222,4 +223,4 @@ void clockevents_notify(unsigned long reason, void *arg)
spin_unlock(&clockevents_lock); spin_unlock(&clockevents_lock);
} }
EXPORT_SYMBOL_GPL(clockevents_notify); EXPORT_SYMBOL_GPL(clockevents_notify);
#endif

View file

@ -64,8 +64,9 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
*/ */
int tick_check_broadcast_device(struct clock_event_device *dev) int tick_check_broadcast_device(struct clock_event_device *dev)
{ {
if (tick_broadcast_device.evtdev || if ((tick_broadcast_device.evtdev &&
(dev->features & CLOCK_EVT_FEAT_C3STOP)) tick_broadcast_device.evtdev->rating >= dev->rating) ||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0; return 0;
clockevents_exchange_device(NULL, dev); clockevents_exchange_device(NULL, dev);
@ -176,8 +177,6 @@ static void tick_do_periodic_broadcast(void)
*/ */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev) static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{ {
dev->next_event.tv64 = KTIME_MAX;
tick_do_periodic_broadcast(); tick_do_periodic_broadcast();
/* /*
@ -515,11 +514,9 @@ static void tick_broadcast_clear_oneshot(int cpu)
*/ */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc) void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{ {
if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { bc->event_handler = tick_handle_oneshot_broadcast;
bc->event_handler = tick_handle_oneshot_broadcast; clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); bc->next_event.tv64 = KTIME_MAX;
bc->next_event.tv64 = KTIME_MAX;
}
} }
/* /*

View file

@ -200,7 +200,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
cpu = smp_processor_id(); cpu = smp_processor_id();
if (!cpu_isset(cpu, newdev->cpumask)) if (!cpu_isset(cpu, newdev->cpumask))
goto out; goto out_bc;
td = &per_cpu(tick_cpu_device, cpu); td = &per_cpu(tick_cpu_device, cpu);
curdev = td->evtdev; curdev = td->evtdev;
@ -265,7 +265,7 @@ out_bc:
*/ */
if (tick_check_broadcast_device(newdev)) if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP; ret = NOTIFY_STOP;
out:
spin_unlock_irqrestore(&tick_device_lock, flags); spin_unlock_irqrestore(&tick_device_lock, flags);
return ret; return ret;