diff options
author | Arve Hjønnevåg <arve@android.com> | 2010-05-24 20:32:58 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2010-05-24 20:32:58 +0200 |
commit | e7842b938b29d3476a141f0d555d7d526bcb8fd7 (patch) | |
tree | 1dfec55ac607df344a4dcd6b04b5ad2e788c2b50 /kernel | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) |
PM: Opportunistic suspend support
Power management features present in the current mainline kernel are
insufficient to get maximum possible energy savings on some platforms,
such as Android. The problem is that to save maximum amount of energy
all system hardware components need to be in the lowest-power states
available for as long as reasonably possible, but at the same time the
system must always respond to certain events, regardless of the
current state of the hardware.
The first goal can be achieved either by using device runtime PM and
cpuidle to put all hardware into low-power states, transparently from
the user space point of view, or by suspending the whole system.
However, system suspend, in its current form, does not guarantee that
the events of interest will always be responded to, since wakeup
events (events that wake the CPU from idle and the system from
suspend) that occur right after initiating suspend will not be
processed until another possibly unrelated event wakes the system up
again.
On hardware where idle can enter the same power state as suspend, idle
combined with runtime PM can be used, but periodic wakeups increase
the average power consumption. Suspending the system also reduces the
harm caused by apps that never go idle. There also are systems where
some devices cannot be put into low-power states without suspending
the entire system (or the low-power states available to them without
suspending the entire system are substantially shallower than the
low-power states they are put into when the entire system is
suspended), so the system has to be suspended as a whole to achieve
the maximum energy savings.
To allow Android and similar platforms to save more energy than they
currently can save using the mainline kernel, introduce a mechanism by
which the system is automatically suspended (i.e. put into a
system-wide sleep state) whenever it's not doing work that's
immediately useful to the user, called opportunistic suspend.
For this purpose introduce the suspend blockers framework allowing the
kernel's power management subsystem to decide when it is desirable to
suspend the system (i.e. when the system is not doing anything the
user really cares about at the moment and therefore it may be
suspended). Add an API that that drivers can use to block
opportunistic suspend. This is needed to avoid losing wakeup events
that occur right after suspend is initiated.
Add /sys/power/policy that selects the behavior of /sys/power/state.
After setting the policy to opportunistic, writes to /sys/power/state
become non-blocking requests that specify which suspend state to enter
when no suspend blockers are active. A special state, "on", stops the
process by activating the "main" suspend blocker.
Signed-off-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/power/Kconfig | 16 | ||||
-rw-r--r-- | kernel/power/Makefile | 1 | ||||
-rw-r--r-- | kernel/power/main.c | 130 | ||||
-rw-r--r-- | kernel/power/opportunistic_suspend.c | 298 | ||||
-rw-r--r-- | kernel/power/power.h | 9 | ||||
-rw-r--r-- | kernel/power/suspend.c | 3 |
6 files changed, 449 insertions, 8 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 5c36ea9d55d2..6d11a4521d1e 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -130,6 +130,22 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. +config OPPORTUNISTIC_SUSPEND + bool "Opportunistic suspend" + depends on SUSPEND + select RTC_LIB + default n + ---help--- + Opportunistic sleep support. Allows the system to be put into a sleep + state opportunistically, if it doesn't do any useful work at the + moment. The PM subsystem is switched into this mode of operation by + writing "opportunistic" into /sys/power/policy, while writing + "forced" to this file turns the opportunistic suspend feature off. + In the "opportunistic" mode suspend blockers are used to determine + when to suspend the system and the value written to /sys/power/state + determines the sleep state the system will be put into when there are + no active suspend blockers. + config HIBERNATION_NVS bool diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 524e058dcf06..9b01cb019319 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_PM) += main.o obj-$(CONFIG_PM_SLEEP) += console.o obj-$(CONFIG_FREEZER) += process.o obj-$(CONFIG_SUSPEND) += suspend.o +obj-$(CONFIG_OPPORTUNISTIC_SUSPEND) += opportunistic_suspend.o obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b21fc0..51d6c982a855 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -20,6 +20,58 @@ DEFINE_MUTEX(pm_mutex); unsigned int pm_flags; EXPORT_SYMBOL(pm_flags); +#ifdef CONFIG_OPPORTUNISTIC_SUSPEND +struct pm_policy { + const char *name; + bool (*valid_state)(suspend_state_t state); + int (*set_state)(suspend_state_t state); +}; + +static struct pm_policy policies[] = { + { + .name = "forced", + .valid_state = valid_state, + .set_state = enter_state, + }, + { + .name = "opportunistic", + .valid_state = opportunistic_suspend_valid_state, + .set_state = opportunistic_suspend_state, + }, +}; + +static int policy; + +static inline bool hibernation_supported(void) +{ + return !strncmp(policies[policy].name, "forced", 6); +} + +static inline bool pm_state_valid(int state_idx) +{ + return pm_states[state_idx] && policies[policy].valid_state(state_idx); +} + +static inline int pm_enter_state(int state_idx) +{ + return policies[policy].set_state(state_idx); +} + +#else + +static inline bool hibernation_supported(void) { return true; } + +static inline bool pm_state_valid(int state_idx) +{ + return pm_states[state_idx] && valid_state(state_idx); +} + +static inline int pm_enter_state(int state_idx) +{ + return enter_state(state_idx); +} +#endif /* CONFIG_OPPORTUNISTIC_SUSPEND */ + #ifdef CONFIG_PM_SLEEP /* Routines for PM-transition notifications */ @@ -146,6 +198,12 @@ struct kobject *power_kobj; * * store() accepts one of those strings, translates it into the * proper enumerated value, and initiates a suspend transition. + * + * If policy is set to opportunistic, store() does not block until the + * system resumes, and it will try to re-enter the state until another + * state is requested. Suspend blockers are respected and the requested + * state will only be entered when no suspend blockers are active. + * Write "on" to disable. */ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -155,12 +213,15 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, int i; for (i = 0; i < PM_SUSPEND_MAX; i++) { - if (pm_states[i] && valid_state(i)) + if (pm_state_valid(i)) s += sprintf(s,"%s ", pm_states[i]); } #endif #ifdef CONFIG_HIBERNATION - s += sprintf(s, "%s\n", "disk"); + if (hibernation_supported()) + s += sprintf(s, "%s\n", "disk"); + else + s += sprintf(s, "\n"); #else if (s != buf) /* convert the last space to a newline */ @@ -173,7 +234,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { #ifdef CONFIG_SUSPEND - suspend_state_t state = PM_SUSPEND_STANDBY; + suspend_state_t state = PM_SUSPEND_ON; const char * const *s; #endif char *p; @@ -185,8 +246,9 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, /* First, check if we are requested to hibernate */ if (len == 4 && !strncmp(buf, "disk", len)) { - error = hibernate(); - goto Exit; + if (hibernation_supported()) + error = hibernate(); + goto Exit; } #ifdef CONFIG_SUSPEND @@ -195,7 +257,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, break; } if (state < PM_SUSPEND_MAX && *s) - error = enter_state(state); + error = pm_enter_state(state); #endif Exit: @@ -204,6 +266,56 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, power_attr(state); +#ifdef CONFIG_OPPORTUNISTIC_SUSPEND +/** + * policy - set policy for state + */ +static ssize_t policy_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int i; + + for (i = 0; i < ARRAY_SIZE(policies); i++) { + if (i == policy) + s += sprintf(s, "[%s] ", policies[i].name); + else + s += sprintf(s, "%s ", policies[i].name); + } + if (s != buf) + /* convert the last space to a newline */ + *(s-1) = '\n'; + return (s - buf); +} + +static ssize_t policy_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + const char *s; + char *p; + int len; + int i; + + p = memchr(buf, '\n', n); + len = p ? p - buf : n; + + for (i = 0; i < ARRAY_SIZE(policies); i++) { + s = policies[i].name; + if (s && len == strlen(s) && !strncmp(buf, s, len)) { + mutex_lock(&pm_mutex); + policies[policy].set_state(PM_SUSPEND_ON); + policy = i; + mutex_unlock(&pm_mutex); + return n; + } + } + return -EINVAL; +} + +power_attr(policy); +#endif /* CONFIG_OPPORTUNISTIC_SUSPEND */ + #ifdef CONFIG_PM_TRACE int pm_trace_enabled; @@ -236,6 +348,9 @@ static struct attribute * g[] = { #endif #ifdef CONFIG_PM_SLEEP &pm_async_attr.attr, +#ifdef CONFIG_OPPORTUNISTIC_SUSPEND + &policy_attr.attr, +#endif #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif @@ -247,7 +362,7 @@ static struct attribute_group attr_group = { .attrs = g, }; -#ifdef CONFIG_PM_RUNTIME +#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_OPPORTUNISTIC_SUSPEND) struct workqueue_struct *pm_wq; EXPORT_SYMBOL_GPL(pm_wq); @@ -266,6 +381,7 @@ static int __init pm_init(void) int error = pm_start_workqueue(); if (error) return error; + opportunistic_suspend_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; diff --git a/kernel/power/opportunistic_suspend.c b/kernel/power/opportunistic_suspend.c new file mode 100644 index 000000000000..cc90b6048a44 --- /dev/null +++ b/kernel/power/opportunistic_suspend.c @@ -0,0 +1,298 @@ +/* + * kernel/power/opportunistic_suspend.c + * + * Copyright (C) 2005-2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/suspend.h> + +#include "power.h" + +extern struct workqueue_struct *pm_wq; + +enum { + DEBUG_EXIT_SUSPEND = 1U << 0, + DEBUG_WAKEUP = 1U << 1, + DEBUG_USER_STATE = 1U << 2, + DEBUG_SUSPEND = 1U << 3, + DEBUG_SUSPEND_BLOCKER = 1U << 4, +}; +static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP | DEBUG_USER_STATE; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static int unknown_wakeup_delay_msecs = 500; +module_param_named(unknown_wakeup_delay_msecs, unknown_wakeup_delay_msecs, int, + S_IRUGO | S_IWUSR | S_IWGRP); + +#define SB_INITIALIZED (1U << 8) +#define SB_ACTIVE (1U << 9) + +DEFINE_SUSPEND_BLOCKER(main_suspend_blocker, main); + +static DEFINE_SPINLOCK(list_lock); +static DEFINE_SPINLOCK(state_lock); +static LIST_HEAD(inactive_blockers); +static LIST_HEAD(active_blockers); +static int current_event_num; +static suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; +static bool enable_suspend_blockers; +static DEFINE_SUSPEND_BLOCKER(unknown_wakeup, unknown_wakeups); + +#define pr_info_time(fmt, args...) \ + do { \ + struct timespec ts; \ + struct rtc_time tm; \ + getnstimeofday(&ts); \ + rtc_time_to_tm(ts.tv_sec, &tm); \ + pr_info(fmt "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n" , \ + args, \ + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \ + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \ + } while (0); + +static void print_active_suspend_blockers(void) +{ + struct suspend_blocker *blocker; + + list_for_each_entry(blocker, &active_blockers, link) + pr_info("PM: Active suspend blocker %s\n", blocker->name); +} + +/** + * suspend_is_blocked - Check if there are active suspend blockers. + * + * Return true if suspend blockers are enabled and there are active suspend + * blockers, in which case the system cannot be put to sleep opportunistically. + */ +bool suspend_is_blocked(void) +{ + return enable_suspend_blockers && !list_empty(&active_blockers); +} + +static void expire_unknown_wakeup(unsigned long data) +{ + suspend_unblock(&unknown_wakeup); +} +static DEFINE_TIMER(expire_unknown_wakeup_timer, expire_unknown_wakeup, 0, 0); + +static void suspend_worker(struct work_struct *work) +{ + int ret; + int entry_event_num; + + enable_suspend_blockers = true; + + if (suspend_is_blocked()) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("PM: Automatic suspend aborted\n"); + goto abort; + } + + entry_event_num = current_event_num; + + if (debug_mask & DEBUG_SUSPEND) + pr_info("PM: Automatic suspend\n"); + + ret = pm_suspend(requested_suspend_state); + + if (debug_mask & DEBUG_EXIT_SUSPEND) + pr_info_time("PM: Automatic suspend exit, ret = %d ", ret); + + if (current_event_num == entry_event_num) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("PM: pm_suspend() returned with no event\n"); + suspend_block(&unknown_wakeup); + mod_timer(&expire_unknown_wakeup_timer, + msecs_to_jiffies(unknown_wakeup_delay_msecs)); + } + +abort: + enable_suspend_blockers = false; +} +static DECLARE_WORK(suspend_work, suspend_worker); + +/** + * suspend_blocker_register - Prepare a suspend blocker for being used. + * @blocker: Suspend blocker to handle. + * + * The suspend blocker struct and name must not be freed before calling + * suspend_blocker_unregister(). + */ +void suspend_blocker_register(struct suspend_blocker *blocker) +{ + unsigned long irqflags = 0; + + WARN_ON(!blocker->name); + + if (debug_mask & DEBUG_SUSPEND_BLOCKER) + pr_info("%s: Registering %s\n", __func__, blocker->name); + + blocker->flags = SB_INITIALIZED; + INIT_LIST_HEAD(&blocker->link); + + spin_lock_irqsave(&list_lock, irqflags); + list_add(&blocker->link, &inactive_blockers); + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(suspend_blocker_register); + +/** + * suspend_blocker_init - Initialize a suspend blocker's name and register it. + * @blocker: Suspend blocker to initialize. + * @name: The name of the suspend blocker to show in debug messages. + * + * The suspend blocker struct and name must not be freed before calling + * suspend_blocker_unregister(). + */ +void suspend_blocker_init(struct suspend_blocker *blocker, const char *name) +{ + blocker->name = name; + suspend_blocker_register(blocker); +} +EXPORT_SYMBOL(suspend_blocker_init); + +/** + * suspend_blocker_unregister - Unregister a suspend blocker. + * @blocker: Suspend blocker to handle. + */ +void suspend_blocker_unregister(struct suspend_blocker *blocker) +{ + unsigned long irqflags; + + if (WARN_ON(!(blocker->flags & SB_INITIALIZED))) + return; + + spin_lock_irqsave(&list_lock, irqflags); + blocker->flags &= ~SB_INITIALIZED; + list_del(&blocker->link); + if ((blocker->flags & SB_ACTIVE) && list_empty(&active_blockers)) + queue_work(pm_wq, &suspend_work); + spin_unlock_irqrestore(&list_lock, irqflags); + + if (debug_mask & DEBUG_SUSPEND_BLOCKER) + pr_info("%s: Unregistered %s\n", __func__, blocker->name); +} +EXPORT_SYMBOL(suspend_blocker_unregister); + +/** + * suspend_block - Block system suspend. + * @blocker: Suspend blocker to use. + * + * It is safe to call this function from interrupt context. + */ +void suspend_block(struct suspend_blocker *blocker) +{ + unsigned long irqflags; + + if (WARN_ON(!(blocker->flags & SB_INITIALIZED))) + return; + + spin_lock_irqsave(&list_lock, irqflags); + + if (debug_mask & DEBUG_SUSPEND_BLOCKER) + pr_info("%s: %s\n", __func__, blocker->name); + + blocker->flags |= SB_ACTIVE; + list_move(&blocker->link, &active_blockers); + + current_event_num++; + + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(suspend_block); + +/** + * suspend_unblock - Allow system suspend to happen. + * @blocker: Suspend blocker to unblock. + * + * If no other suspend blockers are active, schedule suspend of the system. + * + * It is safe to call this function from interrupt context. + */ +void suspend_unblock(struct suspend_blocker *blocker) +{ + unsigned long irqflags; + + if (WARN_ON(!(blocker->flags & SB_INITIALIZED))) + return; + + spin_lock_irqsave(&list_lock, irqflags); + + if (debug_mask & DEBUG_SUSPEND_BLOCKER) + pr_info("%s: %s\n", __func__, blocker->name); + + list_move(&blocker->link, &inactive_blockers); + if ((blocker->flags & SB_ACTIVE) && list_empty(&active_blockers)) + queue_work(pm_wq, &suspend_work); + blocker->flags &= ~(SB_ACTIVE); + + if ((debug_mask & DEBUG_SUSPEND) && blocker == &main_suspend_blocker) + print_active_suspend_blockers(); + + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(suspend_unblock); + +/** + * suspend_blocker_is_active - Test if a suspend blocker is blocking suspend + * @blocker: Suspend blocker to check. + * + * Returns true if the suspend_blocker is currently active. + */ +bool suspend_blocker_is_active(struct suspend_blocker *blocker) +{ + WARN_ON(!(blocker->flags & SB_INITIALIZED)); + + return !!(blocker->flags & SB_ACTIVE); +} +EXPORT_SYMBOL(suspend_blocker_is_active); + +bool opportunistic_suspend_valid_state(suspend_state_t state) +{ + return (state == PM_SUSPEND_ON) || valid_state(state); +} + +int opportunistic_suspend_state(suspend_state_t state) +{ + unsigned long irqflags; + + if (!opportunistic_suspend_valid_state(state)) + return -ENODEV; + + spin_lock_irqsave(&state_lock, irqflags); + + if (debug_mask & DEBUG_USER_STATE) + pr_info_time("%s: %s (%d->%d) at %lld ", __func__, + state != PM_SUSPEND_ON ? "sleep" : "wakeup", + requested_suspend_state, state, + ktime_to_ns(ktime_get())); + + requested_suspend_state = state; + if (state == PM_SUSPEND_ON) + suspend_block(&main_suspend_blocker); + else + suspend_unblock(&main_suspend_blocker); + + spin_unlock_irqrestore(&state_lock, irqflags); + + return 0; +} + +void __init opportunistic_suspend_init(void) +{ + suspend_blocker_register(&main_suspend_blocker); + suspend_block(&main_suspend_blocker); + suspend_blocker_register(&unknown_wakeup); +} diff --git a/kernel/power/power.h b/kernel/power/power.h index 006270fe382d..d8c4e0d495c7 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -233,3 +233,12 @@ static inline void suspend_thaw_processes(void) { } #endif + +#ifdef CONFIG_OPPORTUNISTIC_SUSPEND +/* kernel/power/opportunistic_suspend.c */ +extern int opportunistic_suspend_state(suspend_state_t state); +extern bool opportunistic_suspend_valid_state(suspend_state_t state); +extern void __init opportunistic_suspend_init(void); +#else +static inline void opportunistic_suspend_init(void) {} +#endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 56e7dbb8b996..9eb3876f21c4 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -20,6 +20,7 @@ #include "power.h" const char *const pm_states[PM_SUSPEND_MAX] = { + [PM_SUSPEND_ON] = "on", [PM_SUSPEND_STANDBY] = "standby", [PM_SUSPEND_MEM] = "mem", }; @@ -157,7 +158,7 @@ static int suspend_enter(suspend_state_t state) error = sysdev_suspend(PMSG_SUSPEND); if (!error) { - if (!suspend_test(TEST_CORE)) + if (!suspend_is_blocked() && !suspend_test(TEST_CORE)) error = suspend_ops->enter(state); sysdev_resume(); } |