summaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2011-04-04 10:56:56 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2011-04-04 10:56:56 +1000
commit905d5ddb789c2171468fe8c7efe0d85f2d2160a2 (patch)
tree1359a5db587b93d78ad0949dbe18cc91afdaa12d /arch/x86/kernel
parentcaed937617784e416522d37a062c2098d6df2216 (diff)
parent42be29a3d2b7c07ca0c97150aa3598b964f5a8ee (diff)
Merge remote-tracking branch 'idle-test/idle-test'
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/cstate.c24
-rw-r--r--arch/x86/kernel/apm_32.c232
-rw-r--r--arch/x86/kernel/cpu/bugs.c13
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/process.c167
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/smpboot.c2
10 files changed, 71 insertions, 383 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 5812404a0d4c..c49281e8e9a2 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -149,6 +149,30 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+static void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+{
+ if (!need_resched()) {
+ if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
+ clflush((void *)&current_thread_info()->flags);
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(ax, cx);
+ }
+}
+
+
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 0b4be431c620..f340c0f7e8ff 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -175,8 +175,6 @@
* Ignore first resume after we generate our own resume event
* after a suspend (Thomas Hood)
* Daemonize now gets rid of our controlling terminal (sfr).
- * CONFIG_APM_CPU_IDLE now just affects the default value of
- * idle_threshold (sfr).
* Change name of kernel apm daemon (as it no longer idles) (sfr).
* 1.16ac: Fix up SMP support somewhat. You can now force SMP on and we
* make _all_ APM calls on the CPU#0. Fix unsafe sign bug.
@@ -262,12 +260,6 @@ extern int (*console_blank_hook)(int);
* [no-]smp Use apm even on an SMP box
* bounce[-_]interval=<n> number of ticks to ignore suspend
* bounces
- * idle[-_]threshold=<n> System idle percentage above which to
- * make APM BIOS idle calls. Set it to
- * 100 to disable.
- * idle[-_]period=<n> Period (in 1/100s of a second) over
- * which the idle percentage is
- * calculated.
*/
/* KNOWN PROBLEM MACHINES:
@@ -357,26 +349,12 @@ struct apm_user {
#define APM_BIOS_MAGIC 0x4101
/*
- * idle percentage above which bios idle calls are done
- */
-#ifdef CONFIG_APM_CPU_IDLE
-#define DEFAULT_IDLE_THRESHOLD 95
-#else
-#define DEFAULT_IDLE_THRESHOLD 100
-#endif
-#define DEFAULT_IDLE_PERIOD (100 / 3)
-
-/*
* Local variables
*/
static struct {
unsigned long offset;
unsigned short segment;
} apm_bios_entry;
-static int clock_slowed;
-static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
-static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
-static int set_pm_idle;
static int suspends_pending;
static int standbys_pending;
static int ignore_sys_suspend;
@@ -806,165 +784,6 @@ static int set_system_power_state(u_short state)
}
/**
- * apm_do_idle - perform power saving
- *
- * This function notifies the BIOS that the processor is (in the view
- * of the OS) idle. It returns -1 in the event that the BIOS refuses
- * to handle the idle request. On a success the function returns 1
- * if the BIOS did clock slowing or 0 otherwise.
- */
-
-static int apm_do_idle(void)
-{
- u32 eax;
- u8 ret = 0;
- int idled = 0;
- int polling;
- int err = 0;
-
- polling = !!(current_thread_info()->status & TS_POLLING);
- if (polling) {
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
- }
- if (!need_resched()) {
- idled = 1;
- ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err);
- }
- if (polling)
- current_thread_info()->status |= TS_POLLING;
-
- if (!idled)
- return 0;
-
- if (ret) {
- static unsigned long t;
-
- /* This always fails on some SMP boards running UP kernels.
- * Only report the failure the first 5 times.
- */
- if (++t < 5) {
- printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err);
- t = jiffies;
- }
- return -1;
- }
- clock_slowed = (apm_info.bios.flags & APM_IDLE_SLOWS_CLOCK) != 0;
- return clock_slowed;
-}
-
-/**
- * apm_do_busy - inform the BIOS the CPU is busy
- *
- * Request that the BIOS brings the CPU back to full performance.
- */
-
-static void apm_do_busy(void)
-{
- u32 dummy;
- int err;
-
- if (clock_slowed || ALWAYS_CALL_BUSY) {
- (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err);
- clock_slowed = 0;
- }
-}
-
-/*
- * If no process has really been interested in
- * the CPU for some time, we want to call BIOS
- * power management - we probably want
- * to conserve power.
- */
-#define IDLE_CALC_LIMIT (HZ * 100)
-#define IDLE_LEAKY_MAX 16
-
-static void (*original_pm_idle)(void) __read_mostly;
-
-/**
- * apm_cpu_idle - cpu idling for APM capable Linux
- *
- * This is the idling function the kernel executes when APM is available. It
- * tries to do BIOS powermanagement based on the average system idle time.
- * Furthermore it calls the system default idle routine.
- */
-
-static void apm_cpu_idle(void)
-{
- static int use_apm_idle; /* = 0 */
- static unsigned int last_jiffies; /* = 0 */
- static unsigned int last_stime; /* = 0 */
-
- int apm_idle_done = 0;
- unsigned int jiffies_since_last_check = jiffies - last_jiffies;
- unsigned int bucket;
-
-recalc:
- if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
- use_apm_idle = 0;
- last_jiffies = jiffies;
- last_stime = current->stime;
- } else if (jiffies_since_last_check > idle_period) {
- unsigned int idle_percentage;
-
- idle_percentage = current->stime - last_stime;
- idle_percentage *= 100;
- idle_percentage /= jiffies_since_last_check;
- use_apm_idle = (idle_percentage > idle_threshold);
- if (apm_info.forbid_idle)
- use_apm_idle = 0;
- last_jiffies = jiffies;
- last_stime = current->stime;
- }
-
- bucket = IDLE_LEAKY_MAX;
-
- while (!need_resched()) {
- if (use_apm_idle) {
- unsigned int t;
-
- t = jiffies;
- switch (apm_do_idle()) {
- case 0:
- apm_idle_done = 1;
- if (t != jiffies) {
- if (bucket) {
- bucket = IDLE_LEAKY_MAX;
- continue;
- }
- } else if (bucket) {
- bucket--;
- continue;
- }
- break;
- case 1:
- apm_idle_done = 1;
- break;
- default: /* BIOS refused */
- break;
- }
- }
- if (original_pm_idle)
- original_pm_idle();
- else
- default_idle();
- local_irq_disable();
- jiffies_since_last_check = jiffies - last_jiffies;
- if (jiffies_since_last_check > idle_period)
- goto recalc;
- }
-
- if (apm_idle_done)
- apm_do_busy();
-
- local_irq_enable();
-}
-
-/**
* apm_power_off - ask the BIOS to power off
*
* Handle the power off sequence. This is the one piece of code we
@@ -1872,12 +1691,6 @@ static int __init apm_setup(char *str)
if ((strncmp(str, "bounce-interval=", 16) == 0) ||
(strncmp(str, "bounce_interval=", 16) == 0))
bounce_interval = simple_strtol(str + 16, NULL, 0);
- if ((strncmp(str, "idle-threshold=", 15) == 0) ||
- (strncmp(str, "idle_threshold=", 15) == 0))
- idle_threshold = simple_strtol(str + 15, NULL, 0);
- if ((strncmp(str, "idle-period=", 12) == 0) ||
- (strncmp(str, "idle_period=", 12) == 0))
- idle_period = simple_strtol(str + 12, NULL, 0);
invert = (strncmp(str, "no-", 3) == 0) ||
(strncmp(str, "no_", 3) == 0);
if (invert)
@@ -1889,7 +1702,6 @@ static int __init apm_setup(char *str)
power_off = !invert;
if (strncmp(str, "smp", 3) == 0) {
smp = !invert;
- idle_threshold = 100;
}
if ((strncmp(str, "allow-ints", 10) == 0) ||
(strncmp(str, "allow_ints", 10) == 0))
@@ -1990,17 +1802,6 @@ static int __init apm_is_horked_d850md(const struct dmi_system_id *d)
return 0;
}
-/* Some APM bioses hang on APM idle calls */
-static int __init apm_likes_to_melt(const struct dmi_system_id *d)
-{
- if (apm_info.forbid_idle == 0) {
- apm_info.forbid_idle = 1;
- printk(KERN_INFO "%s machine detected. "
- "Disabling APM idle calls.\n", d->ident);
- }
- return 0;
-}
-
/*
* Check for clue free BIOS implementations who use
* the following QA technique
@@ -2141,16 +1942,6 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
},
- { /* APM idle hangs */
- apm_likes_to_melt, "Jabil AMD",
- { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
- DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), },
- },
- { /* APM idle hangs */
- apm_likes_to_melt, "AMI Bios",
- { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
- DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), },
- },
{ /* Handle problems with APM on Sony Vaio PCG-N505X(DE) */
swab_apm_power_in_minutes, "Sony VAIO",
{ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
@@ -2380,14 +2171,6 @@ static int __init apm_init(void)
if (misc_register(&apm_device))
printk(KERN_WARNING "apm: Could not register misc device.\n");
- if (HZ != 100)
- idle_period = (idle_period * HZ) / 100;
- if (idle_threshold < 100) {
- original_pm_idle = pm_idle;
- pm_idle = apm_cpu_idle;
- set_pm_idle = 1;
- }
-
return 0;
}
@@ -2395,15 +2178,6 @@ static void __exit apm_exit(void)
{
int error;
- if (set_pm_idle) {
- pm_idle = original_pm_idle;
- /*
- * We are about to unload the current idle thread pm callback
- * (pm_idle), Wait for all processors to update cached/local
- * copies of pm_idle before proceeding.
- */
- cpu_idle_wait();
- }
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
&& (apm_info.connection_version > 0x0100)) {
error = apm_engage_power_management(APM_DEVICE_ALL, 0);
@@ -2440,12 +2214,6 @@ MODULE_PARM_DESC(broken_psr, "BIOS has a broken GetPowerStatus call");
module_param(realmode_power_off, bool, 0444);
MODULE_PARM_DESC(realmode_power_off,
"Switch to real mode before powering off");
-module_param(idle_threshold, int, 0444);
-MODULE_PARM_DESC(idle_threshold,
- "System idle percentage above which to make APM BIOS idle calls");
-module_param(idle_period, int, 0444);
-MODULE_PARM_DESC(idle_period,
- "Period (in sec/100) over which to caculate the idle percentage");
module_param(smp, bool, 0444);
MODULE_PARM_DESC(smp,
"Set this to enable APM use on an SMP platform. Use with caution on older systems");
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c39576cb3018..b5e291014fc2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,14 +17,6 @@
#include <asm/paravirt.h>
#include <asm/alternative.h>
-static int __init no_halt(char *s)
-{
- boot_cpu_data.hlt_works_ok = 0;
- return 1;
-}
-
-__setup("no-hlt", no_halt);
-
static int __init no_387(char *s)
{
boot_cpu_data.hard_math = 0;
@@ -90,10 +82,7 @@ static void __init check_hlt(void)
return;
printk(KERN_INFO "Checking 'hlt' instruction... ");
- if (!boot_cpu_data.hlt_works_ok) {
- printk("disabled\n");
- return;
- }
+
halt();
halt();
halt();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e2ced0074a45..f1cf79032d40 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -887,7 +887,7 @@ static void vgetcpu_set_mode(void)
void __init identify_boot_cpu(void)
{
identify_cpu(&boot_cpu_data);
- init_c1e_mask();
+ init_amd_e400_mask();
#ifdef CONFIG_X86_32
sysenter_setup();
enable_sep_cpu();
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 62ac8cb6ba27..203feb779def 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -33,7 +33,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
seq_printf(m,
"fdiv_bug\t: %s\n"
- "hlt_bug\t\t: %s\n"
"f00f_bug\t: %s\n"
"coma_bug\t: %s\n"
"fpu\t\t: %s\n"
@@ -41,7 +40,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
"cpuid level\t: %d\n"
"wp\t\t: %s\n",
c->fdiv_bug ? "yes" : "no",
- c->hlt_works_ok ? "no" : "yes",
c->f00f_bug ? "yes" : "no",
c->coma_bug ? "yes" : "no",
c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index d46cbe46b7ab..1db82b0b6060 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,69 +337,30 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
-#ifdef CONFIG_X86_32
-/*
- * This halt magic was a workaround for ancient floppy DMA
- * wreckage. It should be safe to remove.
- */
-static int hlt_counter;
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-EXPORT_SYMBOL(enable_hlt);
-
-static inline int hlt_use_halt(void)
-{
- return (!hlt_counter && boot_cpu_data.hlt_works_ok);
-}
-#else
-static inline int hlt_use_halt(void)
-{
- return 1;
-}
-#endif
/*
* We use this if we don't have any better
* idle routine..
*/
-void default_idle(void)
+static void default_idle(void)
{
- if (hlt_use_halt()) {
- trace_power_start(POWER_CSTATE, 1, smp_processor_id());
- trace_cpu_idle(1, smp_processor_id());
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
+ trace_power_start(POWER_CSTATE, 1, smp_processor_id());
+ trace_cpu_idle(1, smp_processor_id());
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we
+ * test NEED_RESCHED:
+ */
+ smp_mb();
- if (!need_resched())
- safe_halt(); /* enables interrupts racelessly */
- else
- local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
- trace_power_end(smp_processor_id());
- trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
- } else {
+ if (!need_resched())
+ safe_halt(); /* enables interrupts racelessly */
+ else
local_irq_enable();
- /* loop is done by the caller */
- cpu_relax();
- }
+ current_thread_info()->status |= TS_POLLING;
+ trace_power_end(smp_processor_id());
+ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
-#ifdef CONFIG_APM_MODULE
-EXPORT_SYMBOL(default_idle);
-#endif
void stop_this_cpu(void *dummy)
{
@@ -411,8 +372,7 @@ void stop_this_cpu(void *dummy)
disable_local_APIC();
for (;;) {
- if (hlt_works(smp_processor_id()))
- halt();
+ halt();
}
}
@@ -437,50 +397,6 @@ void cpu_idle_wait(void)
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- if (!need_resched()) {
- trace_power_start(POWER_CSTATE, 1, smp_processor_id());
- trace_cpu_idle(1, smp_processor_id());
- if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(0, 0);
- else
- local_irq_enable();
- trace_power_end(smp_processor_id());
- trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
- } else
- local_irq_enable();
-}
-
-/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
@@ -517,9 +433,6 @@ int mwait_usable(const struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
- if (boot_option_idle_override == IDLE_FORCE_MWAIT)
- return 1;
-
if (c->cpuid_level < MWAIT_INFO)
return 0;
@@ -535,45 +448,45 @@ int mwait_usable(const struct cpuinfo_x86 *c)
return (edx & MWAIT_EDX_C1);
}
-bool c1e_detected;
-EXPORT_SYMBOL(c1e_detected);
+bool amd_e400_detected;
+EXPORT_SYMBOL(amd_e400_detected);
-static cpumask_var_t c1e_mask;
+static cpumask_var_t amd_e400_mask;
-void c1e_remove_cpu(int cpu)
+void amd_e400_remove_cpu(int cpu)
{
- if (c1e_mask != NULL)
- cpumask_clear_cpu(cpu, c1e_mask);
+ if (amd_e400_mask != NULL)
+ cpumask_clear_cpu(cpu, amd_e400_mask);
}
/*
- * C1E aware idle routine. We check for C1E active in the interrupt
+ * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
* pending message MSR. If we detect C1E, then we handle it the same
* way as C3 power states (local apic timer and TSC stop)
*/
-static void c1e_idle(void)
+static void amd_e400_idle(void)
{
if (need_resched())
return;
- if (!c1e_detected) {
+ if (!amd_e400_detected) {
u32 lo, hi;
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (lo & K8_INTP_C1E_ACTIVE_MASK) {
- c1e_detected = true;
+ amd_e400_detected = true;
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
printk(KERN_INFO "System has AMD C1E enabled\n");
}
}
- if (c1e_detected) {
+ if (amd_e400_detected) {
int cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, c1e_mask)) {
- cpumask_set_cpu(cpu, c1e_mask);
+ if (!cpumask_test_cpu(cpu, amd_e400_mask)) {
+ cpumask_set_cpu(cpu, amd_e400_mask);
/*
* Force broadcast so ACPI can not interfere.
*/
@@ -608,25 +521,19 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
if (pm_idle)
return;
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * One CPU supports mwait => All CPUs supports mwait
- */
- printk(KERN_INFO "using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- } else if (cpu_has_amd_erratum(amd_erratum_400)) {
+ if (cpu_has_amd_erratum(amd_erratum_400)) {
/* E400: APIC timer interrupt does not wake up CPU from C1e */
- printk(KERN_INFO "using C1E aware idle routine\n");
- pm_idle = c1e_idle;
+ printk(KERN_INFO "using E400 aware idle routine\n");
+ pm_idle = amd_e400_idle;
} else
pm_idle = default_idle;
}
-void __init init_c1e_mask(void)
+void __init init_amd_e400_mask(void)
{
- /* If we're using c1e_idle, we need to allocate c1e_mask. */
- if (pm_idle == c1e_idle)
- zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
+ /* If we're using amd_e400_idle, we need to allocate amd_e400_mask. */
+ if (pm_idle == amd_e400_idle)
+ zalloc_cpumask_var(&amd_e400_mask, GFP_KERNEL);
}
static int __init idle_setup(char *str)
@@ -638,8 +545,6 @@ static int __init idle_setup(char *str)
printk("using polling idle threads.\n");
pm_idle = poll_idle;
boot_option_idle_override = IDLE_POLL;
- } else if (!strcmp(str, "mwait")) {
- boot_option_idle_override = IDLE_FORCE_MWAIT;
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d128783af47..61fadbeaea94 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -38,6 +38,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
+#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -109,7 +110,8 @@ void cpu_idle(void)
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
- pm_idle();
+ if (cpuidle_idle_call())
+ pm_idle();
start_critical_timings();
}
tick_nohz_restart_sched_tick();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd922ac0d..62c219a6c978 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
+#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -136,7 +137,8 @@ void cpu_idle(void)
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
- pm_idle();
+ if (cpuidle_idle_call())
+ pm_idle();
start_critical_timings();
/* In many cases the interrupt that ended idle
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5a0484a95ad6..f2ffaf45103b 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -176,9 +176,9 @@ static struct resource bss_resource = {
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 0, 0, -1};
/* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 0, 0, -1};
EXPORT_SYMBOL(boot_cpu_data);
static void set_mca_bus(int x)
{
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c2871d3c71b6..89c6ae9aaa45 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1307,7 +1307,7 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
- c1e_remove_cpu(raw_smp_processor_id());
+ amd_e400_remove_cpu(raw_smp_processor_id());
mb();
/* Ack it */