summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c242
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/sysfs.c47
3 files changed, 266 insertions, 29 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1a216c114a0f..2869a4865369 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -25,6 +25,7 @@
#include <linux/resume-trace.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/async.h>
#include "../base.h"
#include "power.h"
@@ -42,6 +43,7 @@
LIST_HEAD(dpm_list);
static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
/*
* Set once the preparation of devices for a PM transition has started, reset
@@ -56,6 +58,7 @@ static bool transition_started;
void device_pm_init(struct device *dev)
{
dev->power.status = DPM_ON;
+ init_completion(&dev->power.completion);
pm_runtime_init(dev);
}
@@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev)
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
+ complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
@@ -161,6 +165,57 @@ void device_pm_move_last(struct device *dev)
list_move_tail(&dev->power.entry, &dpm_list);
}
+static ktime_t initcall_debug_start(struct device *dev)
+{
+ ktime_t calltime = ktime_set(0, 0);
+
+ if (initcall_debug) {
+ pr_info("calling %s+ @ %i\n",
+ dev_name(dev), task_pid_nr(current));
+ calltime = ktime_get();
+ }
+
+ return calltime;
+}
+
+static void initcall_debug_report(struct device *dev, ktime_t calltime,
+ int error)
+{
+ ktime_t delta, rettime;
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
+ error, (unsigned long long)ktime_to_ns(delta) >> 10);
+ }
+}
+
+/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+ if (!dev)
+ return;
+
+ if (async || (pm_async_enabled && dev->power.async_suspend))
+ wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+ dpm_wait(dev, *((bool *)async_ptr));
+ return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+ device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
/**
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
@@ -172,13 +227,9 @@ static int pm_op(struct device *dev,
pm_message_t state)
{
int error = 0;
- ktime_t calltime, delta, rettime;
+ ktime_t calltime;
- if (initcall_debug) {
- pr_info("calling %s+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
- calltime = ktime_get();
- }
+ calltime = initcall_debug_start(dev);
switch (state.event) {
#ifdef CONFIG_SUSPEND
@@ -227,12 +278,7 @@ static int pm_op(struct device *dev,
error = -EINVAL;
}
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
- error, (unsigned long long)ktime_to_ns(delta) >> 10);
- }
+ initcall_debug_report(dev, calltime, error);
return error;
}
@@ -309,8 +355,9 @@ static int pm_noirq_op(struct device *dev,
if (initcall_debug) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev),
- error, (unsigned long long)ktime_to_ns(delta) >> 10);
+ printk("initcall %s_i+ returned %d after %Ld usecs\n",
+ dev_name(dev), error,
+ (unsigned long long)ktime_to_ns(delta) >> 10);
}
return error;
@@ -354,6 +401,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}
+static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ s64 usecs64;
+ int usecs;
+
+ calltime = ktime_get();
+ usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs = usecs64;
+ if (usecs == 0)
+ usecs = 1;
+ pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -390,6 +454,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
+ ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
transition_started = false;
@@ -403,22 +468,45 @@ void dpm_resume_noirq(pm_message_t state)
pm_dev_err(dev, state, " early", error);
}
mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, "early");
resume_device_irqs();
}
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * device_resume - Execute "resume" callbacks for given device.
+ * legacy_resume - Execute a legacy (bus or class) resume callback for device.
+ * dev: Device to resume.
+ * cb: Resume callback to execute.
+ */
+static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
+
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
+/**
+ * __device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state)
+static int __device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ dpm_wait(dev->parent, async);
down(&dev->sem);
if (dev->bus) {
@@ -427,7 +515,7 @@ static int device_resume(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->resume) {
pm_dev_dbg(dev, state, "legacy ");
- error = dev->bus->resume(dev);
+ error = legacy_resume(dev, dev->bus->resume);
}
if (error)
goto End;
@@ -448,16 +536,43 @@ static int device_resume(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->resume) {
pm_dev_dbg(dev, state, "legacy class ");
- error = dev->class->resume(dev);
+ error = legacy_resume(dev, dev->class->resume);
}
}
End:
up(&dev->sem);
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static void async_resume(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ pm_dev_dbg(dev, pm_transition, "async ");
+ error = __device_resume(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+ put_device(dev);
+}
+
+static int device_resume(struct device *dev)
+{
+ INIT_COMPLETION(dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend
+ && !pm_trace_is_enabled()) {
+ get_device(dev);
+ async_schedule(async_resume, dev);
+ return 0;
+ }
+
+ return __device_resume(dev, pm_transition, false);
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -468,9 +583,11 @@ static int device_resume(struct device *dev, pm_message_t state)
static void dpm_resume(pm_message_t state)
{
struct list_head list;
+ ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
@@ -481,7 +598,7 @@ static void dpm_resume(pm_message_t state)
dev->power.status = DPM_RESUMING;
mutex_unlock(&dpm_list_mtx);
- error = device_resume(dev, state);
+ error = device_resume(dev);
mutex_lock(&dpm_list_mtx);
if (error)
@@ -496,6 +613,8 @@ static void dpm_resume(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, NULL);
}
/**
@@ -628,6 +747,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
int dpm_suspend_noirq(pm_message_t state)
{
struct device *dev;
+ ktime_t starttime = ktime_get();
int error = 0;
suspend_device_irqs();
@@ -643,29 +763,58 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume_noirq(resume_event(state));
+ else
+ dpm_show_time(starttime, state, "late");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
+ * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+ * dev: Device to suspend.
+ * cb: Suspend callback to execute.
+ */
+static int legacy_suspend(struct device *dev, pm_message_t state,
+ int (*cb)(struct device *dev, pm_message_t state))
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
+
+ error = cb(dev, state);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
+static int async_error;
+
+/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state)
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ dpm_wait_for_children(dev, async);
down(&dev->sem);
+ if (async_error)
+ goto End;
+
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
- error = dev->class->suspend(dev, state);
- suspend_report_result(dev->class->suspend, error);
+ error = legacy_suspend(dev, state, dev->class->suspend);
}
if (error)
goto End;
@@ -686,16 +835,48 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy ");
- error = dev->bus->suspend(dev, state);
- suspend_report_result(dev->bus->suspend, error);
+ error = legacy_suspend(dev, state, dev->bus->suspend);
}
}
+
+ if (!error)
+ dev->power.status = DPM_OFF;
+
End:
up(&dev->sem);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ pm_dev_dbg(dev, pm_transition, "async ");
+ error = __device_suspend(dev, pm_transition, true);
+ if (error) {
+ pm_dev_err(dev, pm_transition, " async", error);
+ async_error = error;
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+ INIT_COMPLETION(dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend, dev);
+ return 0;
+ }
+
+ return __device_suspend(dev, pm_transition, false);
+}
+
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -703,17 +884,20 @@ static int device_suspend(struct device *dev, pm_message_t state)
static int dpm_suspend(pm_message_t state)
{
struct list_head list;
+ ktime_t starttime = ktime_get();
int error = 0;
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state);
+ error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -721,13 +905,19 @@ static int dpm_suspend(pm_message_t state)
put_device(dev);
break;
}
- dev->power.status = DPM_OFF;
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &list);
put_device(dev);
+ if (async_error)
+ break;
}
list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+ if (!error)
+ dpm_show_time(starttime, state, NULL);
return error;
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b8fa1aa5225a..c0bd03c83b9c 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {}
#ifdef CONFIG_PM_SLEEP
-/*
- * main.c
- */
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+/* drivers/base/power/main.c */
extern struct list_head dpm_list; /* The active device list */
static inline struct device *to_device(struct list_head *entry)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 596aeecfdffe..0f75ebb83a79 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -38,6 +38,22 @@
* wakeup events internally (unless they are disabled), keeping
* their hardware in low power modes whenever they're unused. This
* saves runtime power, without requiring system-wide sleep states.
+ *
+ * async - Report/change current async suspend setting for the device
+ *
+ * If set, the PM core will attempt to suspend and resume the device during
+ * system power transitions (e.g. suspend to RAM, hibernation) in parallel
+ * with other devices it doesn't appear to depend on (to the PM core's
+ * knowledge).
+ *
+ * + "enabled\n" to permit the asynchronous suspend/resume of the device
+ * + "disabled\n" to forbid it
+ *
+ * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ * of a device unless it is certain that all of the PM dependencies of the
+ * device are known to the PM core. However, for some devices this
+ * attribute is set to "enabled" by bus type code or device drivers and in
+ * that cases it should be safe to leave the default value.
*/
static const char enabled[] = "enabled";
@@ -77,9 +93,40 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ device_async_suspend_enabled(dev) ? enabled : disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ char *cp;
+ int len = n;
+
+ cp = memchr(buf, '\n', n);
+ if (cp)
+ len = cp - buf;
+ if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
+ device_enable_async_suspend(dev, true);
+ else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
+ device_enable_async_suspend(dev, false);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR(async, 0644, async_show, async_store);
+#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
&dev_attr_wakeup.attr,
+#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+ &dev_attr_async.attr,
+#endif
NULL,
};
static struct attribute_group pm_attr_group = {