diff options
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/apic.c | 39 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 34 | ||||
-rw-r--r-- | arch/i386/kernel/hpet.c | 70 | ||||
-rw-r--r-- | arch/i386/kernel/i8253.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 71 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 129 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 23 |
9 files changed, 344 insertions, 43 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 5cff7970911e..93aa911646ad 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -28,6 +28,7 @@ #include <linux/clockchips.h> #include <linux/acpi_pmtmr.h> #include <linux/module.h> +#include <linux/dmi.h> #include <asm/atomic.h> #include <asm/smp.h> @@ -61,6 +62,11 @@ static int enable_local_apic __initdata = 0; /* Local APIC timer verification ok */ static int local_apic_timer_verify_ok; +/* Disable local APIC timer from the kernel commandline or via dmi quirk */ +static int local_apic_timer_disabled; +/* Local APIC timer works in C2 */ +int local_apic_timer_c2_ok; +EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); /* * Debug level, exported for io_apic.c @@ -340,6 +346,22 @@ void __init setup_boot_APIC_clock(void) long delta, deltapm; int pm_referenced = 0; + if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) + local_apic_timer_disabled = 1; + + /* + * The local apic timer can be disabled via the kernel + * commandline or from the test above. Register the lapic + * timer as a dummy clock event source on SMP systems, so the + * broadcast mechanism is used. On UP systems simply ignore it. + */ + if (local_apic_timer_disabled) { + /* No broadcast on UP ! */ + if (num_possible_cpus() > 1) + setup_APIC_timer(); + return; + } + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" "calibrating APIC timer ...\n"); @@ -461,7 +483,8 @@ void __init setup_boot_APIC_clock(void) apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); else local_apic_timer_verify_ok = 0; - } + } else + local_irq_enable(); if (!local_apic_timer_verify_ok) { printk(KERN_WARNING @@ -1179,6 +1202,20 @@ static int __init parse_nolapic(char *arg) } early_param("nolapic", parse_nolapic); +static int __init parse_disable_lapic_timer(char *arg) +{ + local_apic_timer_disabled = 1; + return 0; +} +early_param("nolapic_timer", parse_disable_lapic_timer); + +static int __init parse_lapic_timer_c2_ok(char *arg) +{ + local_apic_timer_c2_ok = 1; + return 0; +} +early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); + static int __init apic_set_verbosity(char *str) { if (strcmp("debug", str) == 0) diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 41cfea57232b..2d47db482972 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -22,6 +22,37 @@ extern void vide(void); __asm__(".align 4\nvide: ret"); +#define ENABLE_C1E_MASK 0x18000000 +#define CPUID_PROCESSOR_SIGNATURE 1 +#define CPUID_XFAM 0x0ff00000 +#define CPUID_XFAM_K8 0x00000000 +#define CPUID_XFAM_10H 0x00100000 +#define CPUID_XFAM_11H 0x00200000 +#define CPUID_XMOD 0x000f0000 +#define CPUID_XMOD_REV_F 0x00040000 + +/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ +static __cpuinit int amd_apic_timer_broken(void) +{ + u32 lo, hi; + u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); + switch (eax & CPUID_XFAM) { + case CPUID_XFAM_K8: + if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) + break; + case CPUID_XFAM_10H: + case CPUID_XFAM_11H: + rdmsr(MSR_K8_ENABLE_C1E, lo, hi); + if (lo & ENABLE_C1E_MASK) + return 1; + break; + default: + /* err on the side of caution */ + return 1; + } + return 0; +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -241,6 +272,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (cpuid_eax(0x80000000) >= 0x80000006) num_cache_leaves = 3; + + if (amd_apic_timer_broken()) + set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); } static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c index f3ab61ee7498..17d73459fc5f 100644 --- a/arch/i386/kernel/hpet.c +++ b/arch/i386/kernel/hpet.c @@ -3,6 +3,8 @@ #include <linux/errno.h> #include <linux/hpet.h> #include <linux/init.h> +#include <linux/sysdev.h> +#include <linux/pm.h> #include <asm/hpet.h> #include <asm/io.h> @@ -197,7 +199,7 @@ static int hpet_next_event(unsigned long delta, cnt += delta; hpet_writel(cnt, HPET_T0_CMP); - return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0); + return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; } /* @@ -307,6 +309,7 @@ int __init hpet_enable(void) out_nohpet: iounmap(hpet_virt_address); hpet_virt_address = NULL; + boot_hpet_disable = 1; return 0; } @@ -521,3 +524,68 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } #endif + + +/* + * Suspend/resume part + */ + +#ifdef CONFIG_PM + +static int hpet_suspend(struct sys_device *sys_device, pm_message_t state) +{ + unsigned long cfg = hpet_readl(HPET_CFG); + + cfg &= ~(HPET_CFG_ENABLE|HPET_CFG_LEGACY); + hpet_writel(cfg, HPET_CFG); + + return 0; +} + +static int hpet_resume(struct sys_device *sys_device) +{ + unsigned int id; + + hpet_start_counter(); + + id = hpet_readl(HPET_ID); + + if (id & HPET_ID_LEGSUP) + hpet_enable_int(); + + return 0; +} + +static struct sysdev_class hpet_class = { + set_kset_name("hpet"), + .suspend = hpet_suspend, + .resume = hpet_resume, +}; + +static struct sys_device hpet_device = { + .id = 0, + .cls = &hpet_class, +}; + + +static __init int hpet_register_sysfs(void) +{ + int err; + + if (!is_hpet_capable()) + return 0; + + err = sysdev_class_register(&hpet_class); + + if (!err) { + err = sysdev_register(&hpet_device); + if (err) + sysdev_class_unregister(&hpet_class); + } + + return err; +} + +device_initcall(hpet_register_sysfs); + +#endif diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 5cbb776b3089..10cef5ca8a5b 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c @@ -47,9 +47,17 @@ static void init_pit_timer(enum clock_event_mode mode, outb(LATCH >> 8 , PIT_CH0); /* MSB */ break; - case CLOCK_EVT_MODE_ONESHOT: + /* + * Avoid unnecessary state transitions, as it confuses + * Geode / Cyrix based boxen. + */ case CLOCK_EVT_MODE_SHUTDOWN: + if (evt->mode == CLOCK_EVT_MODE_UNUSED) + break; case CLOCK_EVT_MODE_UNUSED: + if (evt->mode == CLOCK_EVT_MODE_SHUTDOWN) + break; + case CLOCK_EVT_MODE_ONESHOT: /* One shot setup */ outb_p(0x38, PIT_MODE); udelay(10); diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index e4408ff4e674..b3ab8ffebd27 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -736,7 +736,7 @@ failed: return 0; } -int __init irqbalance_disable(char *str) +int __devinit irqbalance_disable(char *str) { irqbalance_disabled = 1; return 1; diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index b8f16633a6ec..cbe7ec8dbb9f 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -567,6 +567,53 @@ static int cpu_request_microcode(int cpu) return error; } +static int apply_microcode_on_cpu(int cpu) +{ + struct cpuinfo_x86 *c = cpu_data + cpu; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + cpumask_t old; + unsigned int val[2]; + int err = 0; + + if (!uci->mc) + return -EINVAL; + + old = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(cpu)); + + /* Check if the microcode we have in memory matches the CPU */ + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || + cpu_has(c, X86_FEATURE_IA64) || uci->sig != cpuid_eax(0x00000001)) + err = -EINVAL; + + if (!err && ((c->x86_model >= 5) || (c->x86 > 6))) { + /* get processor flags from MSR 0x17 */ + rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + if (uci->pf != (1 << ((val[1] >> 18) & 7))) + err = -EINVAL; + } + + if (!err) { + wrmsr(MSR_IA32_UCODE_REV, 0, 0); + /* see notes above for revision 1.07. Apparent chip bug */ + sync_core(); + /* get the current revision from MSR 0x8B */ + rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + if (uci->rev != val[1]) + err = -EINVAL; + } + + if (!err) + apply_microcode(cpu); + else + printk(KERN_ERR "microcode: Could not apply microcode to CPU%d:" + " sig=0x%x, pf=0x%x, rev=0x%x\n", + cpu, uci->sig, uci->pf, uci->rev); + + set_cpus_allowed(current, old); + return err; +} + static void microcode_init_cpu(int cpu) { cpumask_t old; @@ -577,7 +624,8 @@ static void microcode_init_cpu(int cpu) set_cpus_allowed(current, cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); collect_cpu_info(cpu); - if (uci->valid && system_state == SYSTEM_RUNNING) + if (uci->valid && system_state == SYSTEM_RUNNING && + !suspend_cpu_hotplug) cpu_request_microcode(cpu); mutex_unlock(µcode_mutex); set_cpus_allowed(current, old); @@ -663,13 +711,24 @@ static int mc_sysdev_add(struct sys_device *sys_dev) return 0; pr_debug("Microcode:CPU %d added\n", cpu); - memset(uci, 0, sizeof(*uci)); + /* If suspend_cpu_hotplug is set, the system is resuming and we should + * use the data from before the suspend. + */ + if (suspend_cpu_hotplug) { + err = apply_microcode_on_cpu(cpu); + if (err) + microcode_fini_cpu(cpu); + } + if (!uci->valid) + memset(uci, 0, sizeof(*uci)); err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); if (err) return err; - microcode_init_cpu(cpu); + if (!uci->valid) + microcode_init_cpu(cpu); + return 0; } @@ -680,7 +739,11 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) if (!cpu_online(cpu)) return 0; pr_debug("Microcode:CPU %d removed\n", cpu); - microcode_fini_cpu(cpu); + /* If suspend_cpu_hotplug is set, the system is suspending and we should + * keep the microcode in memory for the resume. + */ + if (!suspend_cpu_hotplug) + microcode_fini_cpu(cpu); sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); return 0; } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 14702427b104..a98ba88a8c0c 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) /* checks for a bit availability (hack for oprofile) */ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) { + int cpu; BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) + return 0; + } + return 1; } /* checks the an msr for availability */ int avail_to_resrv_perfctr_nmi(unsigned int msr) { unsigned int counter; + int cpu; counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) + return 0; + } + return 1; } -int reserve_perfctr_nmi(unsigned int msr) +static int __reserve_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) + if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) return 1; return 0; } -void release_perfctr_nmi(unsigned int msr) +static void __release_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); + clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); } -int reserve_evntsel_nmi(unsigned int msr) +int reserve_perfctr_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_perfctr_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_perfctr_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_perfctr_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_perfctr_nmi(cpu, msr); + } +} + +int __reserve_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) + if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) return 1; return 0; } -void release_evntsel_nmi(unsigned int msr) +static void __release_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); + clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); +} + +int reserve_evntsel_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_evntsel_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_evntsel_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_evntsel_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_evntsel_nmi(cpu, msr); + } } static __cpuinit inline int nmi_known_cpu(void) @@ -263,7 +328,7 @@ static int __init check_nmi_watchdog(void) for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); - mdelay((10*1000)/nmi_hz); // wait 10 ticks + mdelay((20*1000)/nmi_hz); // wait 20 ticks for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP @@ -507,10 +572,10 @@ static int setup_k7_watchdog(void) perfctr_msr = MSR_K7_PERFCTR0; evntsel_msr = MSR_K7_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -533,7 +598,7 @@ static int setup_k7_watchdog(void) wd->check_bit = 1ULL<<63; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -544,8 +609,8 @@ static void stop_k7_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define P6_EVNTSEL0_ENABLE (1 << 22) @@ -563,10 +628,10 @@ static int setup_p6_watchdog(void) perfctr_msr = MSR_P6_PERFCTR0; evntsel_msr = MSR_P6_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -590,7 +655,7 @@ static int setup_p6_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -601,8 +666,8 @@ static void stop_p6_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } /* Note that these events don't tick when the CPU idles. This means @@ -668,10 +733,10 @@ static int setup_p4_watchdog(void) cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); } - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; evntsel = P4_ESCR_EVENT_SELECT(0x3F) @@ -695,7 +760,7 @@ static int setup_p4_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -707,8 +772,8 @@ static void stop_p4_watchdog(void) wrmsr(wd->cccr_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL @@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void) perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void) wd->check_bit = 1ULL << (eax.split.bit_width - 1); return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void) return; wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } void setup_apic_nmi_watchdog (void *unused) diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 0e65f7ab63a2..6cb8f5336732 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c @@ -18,6 +18,8 @@ #include "mach_timer.h" +static int tsc_enabled; + /* * On some systems the TSC frequency does not * change with the cpu frequency. So we need @@ -105,7 +107,7 @@ unsigned long long sched_clock(void) /* * Fall back to jiffies if there's no TSC available: */ - if (tsc_unstable || unlikely(tsc_disable)) + if (unlikely(!tsc_enabled)) /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); @@ -283,6 +285,7 @@ void mark_tsc_unstable(void) { if (!tsc_unstable) { tsc_unstable = 1; + tsc_enabled = 0; /* Can be called before registration */ if (clocksource_tsc.mult) clocksource_change_rating(&clocksource_tsc, 0); @@ -383,7 +386,9 @@ void __init tsc_init(void) if (check_tsc_unstable()) { clocksource_tsc.rating = 0; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; - } + } else + tsc_enabled = 1; + clocksource_register(&clocksource_tsc); return; diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index fb07a1aad225..edc339fa5038 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -69,6 +69,7 @@ struct { void (*flush_tlb)(int); void (*set_initial_ap_state)(int, int); void (*halt)(void); + void (*set_lazy_mode)(int mode); } vmi_ops; /* XXX move this to alternative.h */ @@ -574,6 +575,26 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, } #endif +static void vmi_set_lazy_mode(int mode) +{ + static DEFINE_PER_CPU(int, lazy_mode); + + if (!vmi_ops.set_lazy_mode) + return; + + /* Modes should never nest or overlap */ + BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || + mode == PARAVIRT_LAZY_FLUSH)); + + if (mode == PARAVIRT_LAZY_FLUSH) { + vmi_ops.set_lazy_mode(0); + vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); + } else { + vmi_ops.set_lazy_mode(mode); + __get_cpu_var(lazy_mode) = mode; + } +} + static inline int __init check_vmi_rom(struct vrom_header *rom) { struct pci_header *pci; @@ -804,7 +825,7 @@ static inline int __init activate_vmi(void) para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); para_fill(set_iopl_mask, SetIOPLMask); para_fill(io_delay, IODelay); - para_fill(set_lazy_mode, SetLazyMode); + para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); /* user and kernel flush are just handled with different flags to FlushTLB */ para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); |