summaryrefslogtreecommitdiff
path: root/arch/sh/kernel/idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/idle.c')
-rw-r--r--arch/sh/kernel/idle.c101
1 files changed, 11 insertions, 90 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 3d5a1b387cc0..2ea4483fd722 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -24,98 +24,24 @@
static void (*sh_idle)(void);
-static int hlt_counter;
-
-static int __init nohlt_setup(char *__unused)
-{
- hlt_counter = 1;
- return 1;
-}
-__setup("nohlt", nohlt_setup);
-
-static int __init hlt_setup(char *__unused)
-{
- hlt_counter = 0;
- return 1;
-}
-__setup("hlt", hlt_setup);
-
-static inline int hlt_works(void)
-{
- return !hlt_counter;
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
+void default_idle(void)
{
+ set_bl_bit();
local_irq_enable();
- while (!need_resched())
- cpu_relax();
+ /* Isn't this racy ? */
+ cpu_sleep();
+ clear_bl_bit();
}
-void default_idle(void)
+void arch_cpu_idle_dead(void)
{
- if (hlt_works()) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
-
- set_bl_bit();
- if (!need_resched()) {
- local_irq_enable();
- cpu_sleep();
- } else
- local_irq_enable();
-
- set_thread_flag(TIF_POLLING_NRFLAG);
- clear_bl_bit();
- } else
- poll_idle();
+ play_dead();
}
-/*
- * The idle thread. There's no useful work to be done, so just try to conserve
- * power and have a low exit latency (ie sit in a loop waiting for somebody to
- * say that they'd like to reschedule)
- */
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- unsigned int cpu = smp_processor_id();
-
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched()) {
- check_pgt_cache();
- rmb();
-
- if (cpu_is_offline(cpu))
- play_dead();
-
- local_irq_disable();
- /* Don't trace irqs off for idle */
- stop_critical_timings();
- if (cpuidle_idle_call())
- sh_idle();
- /*
- * Sanity check to ensure that sh_idle() returns
- * with IRQs enabled
- */
- WARN_ON(irqs_disabled());
- start_critical_timings();
- }
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ if (cpuidle_idle_call())
+ sh_idle();
}
void __init select_idle_routine(void)
@@ -123,13 +49,8 @@ void __init select_idle_routine(void)
/*
* If a platform has set its own idle routine, leave it alone.
*/
- if (sh_idle)
- return;
-
- if (hlt_works())
+ if (!sh_idle)
sh_idle = default_idle;
- else
- sh_idle = poll_idle;
}
void stop_this_cpu(void *unused)