diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 109 |
1 files changed, 21 insertions, 88 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e8086c725305..fc95308e9a11 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -28,19 +28,12 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/slab.h> #include <linux/acpi.h> #include <linux/dmi.h> -#include <linux/moduleparam.h> -#include <linux/sched.h> /* need_resched() */ -#include <linux/pm_qos.h> +#include <linux/sched.h> /* need_resched() */ #include <linux/clockchips.h> #include <linux/cpuidle.h> -#include <linux/irqflags.h> /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -52,22 +45,14 @@ #include <asm/apic.h> #endif -#include <asm/io.h> -#include <asm/uaccess.h> - #include <acpi/acpi_bus.h> #include <acpi/processor.h> -#include <asm/processor.h> #define PREFIX "ACPI: " #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); -#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) -#define C2_OVERHEAD 1 /* 1us */ -#define C3_OVERHEAD 1 /* 1us */ -#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; module_param(max_cstate, uint, 0000); @@ -81,10 +66,11 @@ module_param(latency_factor, uint, 0644); static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); +static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; + static int disabled_by_idle_boot_param(void) { return boot_option_idle_override == IDLE_POLL || - boot_option_idle_override == IDLE_FORCE_MWAIT || boot_option_idle_override == IDLE_HALT; } @@ -735,31 +721,17 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - ktime_t kt1, kt2; - s64 idle_time; struct acpi_processor *pr; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); + struct acpi_processor_cx *cx = acpi_cstate[index]; pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; - local_irq_disable(); - - lapic_timer_state_broadcast(pr, cx, 1); - kt1 = ktime_get_real(); acpi_idle_do_entry(cx); - kt2 = ktime_get_real(); - idle_time = ktime_to_us(ktime_sub(kt2, kt1)); - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - - local_irq_enable(); lapic_timer_state_broadcast(pr, cx, 0); return index; @@ -773,8 +745,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, */ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) { - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); + struct acpi_processor_cx *cx = acpi_cstate[index]; ACPI_FLUSH_CPU_CACHE(); @@ -804,21 +775,13 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); - ktime_t kt1, kt2; - s64 idle_time_ns; - s64 idle_time; + struct acpi_processor_cx *cx = acpi_cstate[index]; pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; - local_irq_disable(); - - if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -829,7 +792,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; - local_irq_enable(); return -EINVAL; } } @@ -843,22 +805,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); - kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); - kt2 = ktime_get_real(); - idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); - idle_time = idle_time_ns; - do_div(idle_time, NSEC_PER_USEC); - - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(idle_time_ns); + sched_clock_idle_wakeup_event(0); - local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; @@ -881,15 +833,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); - ktime_t kt1, kt2; - s64 idle_time_ns; - s64 idle_time; - + struct acpi_processor_cx *cx = acpi_cstate[index]; pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; @@ -899,16 +845,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { - local_irq_disable(); acpi_safe_halt(); - local_irq_enable(); return -EBUSY; } } - local_irq_disable(); - - if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -919,7 +860,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; - local_irq_enable(); return -EINVAL; } } @@ -934,7 +874,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, */ lapic_timer_state_broadcast(pr, cx, 1); - kt1 = ktime_get_real(); /* * disable bus master * bm_check implies we need ARB_DIS @@ -965,18 +904,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, c3_cpu_count--; raw_spin_unlock(&c3_lock); } - kt2 = ktime_get_real(); - idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); - idle_time = idle_time_ns; - do_div(idle_time, NSEC_PER_USEC); - - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(idle_time_ns); + sched_clock_idle_wakeup_event(0); - local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; @@ -987,6 +917,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver acpi_idle_driver = { .name = "acpi_idle", .owner = THIS_MODULE, + .en_core_tk_irqen = 1, }; /** @@ -994,13 +925,13 @@ struct cpuidle_driver acpi_idle_driver = { * device i.e. per-cpu data * * @pr: the ACPI processor + * @dev : the cpuidle device */ -static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) +static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, + struct cpuidle_device *dev) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; - struct cpuidle_state_usage *state_usage; - struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); if (!pr->flags.power_setup_done) return -EINVAL; @@ -1009,6 +940,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) return -EINVAL; } + if (!dev) + return -EINVAL; + dev->cpu = pr->id; if (max_cstate == 0) @@ -1016,7 +950,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; - state_usage = &dev->states_usage[count]; if (!cx->valid) continue; @@ -1027,8 +960,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif - - cpuidle_set_statedata(state_usage, cx); + acpi_cstate[count] = cx; count++; if (count == CPUIDLE_STATE_MAX) @@ -1152,7 +1084,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr) cpuidle_disable_device(dev); acpi_processor_get_power_info(pr); if (pr->flags.power) { - acpi_processor_setup_cpuidle_cx(pr); + acpi_processor_setup_cpuidle_cx(pr, dev); ret = cpuidle_enable_device(dev); } cpuidle_resume_and_unlock(); @@ -1200,6 +1132,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) } /* Populate Updated C-state information */ + acpi_processor_get_power_info(pr); acpi_processor_setup_cpuidle_states(pr); /* Enable all cpuidle devices */ @@ -1209,8 +1142,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) continue; acpi_processor_get_power_info(_pr); if (_pr->flags.power) { - acpi_processor_setup_cpuidle_cx(_pr); dev = per_cpu(acpi_cpuidle_device, cpu); + acpi_processor_setup_cpuidle_cx(_pr, dev); cpuidle_enable_device(dev); } } @@ -1279,7 +1212,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) return -ENOMEM; per_cpu(acpi_cpuidle_device, pr->id) = dev; - acpi_processor_setup_cpuidle_cx(pr); + acpi_processor_setup_cpuidle_cx(pr, dev); /* Register per-cpu cpuidle_device. Cpuidle driver * must already be registered before registering device |