summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig57
-rw-r--r--drivers/base/Makefile4
-rw-r--r--drivers/base/attribute_container.c1
-rw-r--r--drivers/base/bus.c51
-rw-r--r--drivers/base/class.c52
-rw-r--r--drivers/base/core.c426
-rw-r--r--drivers/base/cpu.c108
-rw-r--r--drivers/base/dd.c46
-rw-r--r--drivers/base/devres.c1
-rw-r--r--drivers/base/devtmpfs.c19
-rw-r--r--drivers/base/dma-coherent.c3
-rw-r--r--drivers/base/dma-mapping.c1
-rw-r--r--drivers/base/driver.c1
-rw-r--r--drivers/base/firmware_class.c430
-rw-r--r--drivers/base/iommu.c43
-rw-r--r--drivers/base/memory.c132
-rw-r--r--drivers/base/module.c5
-rw-r--r--drivers/base/node.c160
-rw-r--r--drivers/base/platform.c301
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/generic_ops.c233
-rw-r--r--drivers/base/power/main.c203
-rw-r--r--drivers/base/power/opp.c628
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/runtime.c963
-rw-r--r--drivers/base/power/sysfs.c418
-rw-r--r--drivers/base/power/trace.c36
-rw-r--r--drivers/base/power/wakeup.c704
-rw-r--r--drivers/base/sys.c26
-rw-r--r--drivers/base/topology.c18
30 files changed, 3804 insertions, 1278 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ee377270beb9..fd96345bc35c 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -3,35 +3,53 @@ menu "Generic Driver Options"
config UEVENT_HELPER_PATH
string "path to uevent helper"
depends on HOTPLUG
- default "/sbin/hotplug"
+ default ""
help
Path to uevent helper program forked by the kernel for
every uevent.
+ Before the switch to the netlink-based uevent source, this was
+ used to hook hotplug scripts into kernel device events. It
+ usually pointed to a shell script at /sbin/hotplug.
+ This should not be used today, because usual systems create
+ many events at bootup or device discovery in a very short time
+ frame. One forked process per event can create so many processes
+ that it creates a high system load, or on smaller systems
+ it is known to create out-of-memory situations during bootup.
config DEVTMPFS
- bool "Create a kernel maintained /dev tmpfs (EXPERIMENTAL)"
- depends on HOTPLUG && SHMEM && TMPFS
+ bool "Maintain a devtmpfs filesystem to mount at /dev"
+ depends on HOTPLUG
help
- This creates a tmpfs filesystem, and mounts it at bootup
- and mounts it at /dev. The kernel driver core creates device
- nodes for all registered devices in that filesystem. All device
- nodes are owned by root and have the default mode of 0600.
- Userspace can add and delete the nodes as needed. This is
- intended to simplify bootup, and make it possible to delay
- the initial coldplug at bootup done by udev in userspace.
- It should also provide a simpler way for rescue systems
- to bring up a kernel with dynamic major/minor numbers.
- Meaningful symlinks, permissions and device ownership must
- still be handled by userspace.
- If unsure, say N here.
+ This creates a tmpfs/ramfs filesystem instance early at bootup.
+ In this filesystem, the kernel driver core maintains device
+ nodes with their default names and permissions for all
+ registered devices with an assigned major/minor number.
+ Userspace can modify the filesystem content as needed, add
+ symlinks, and apply needed permissions.
+ It provides a fully functional /dev directory, where usually
+ udev runs on top, managing permissions and adding meaningful
+ symlinks.
+ In very limited environments, it may provide a sufficient
+ functional /dev without any further help. It also allows simple
+ rescue systems, and reliably handles dynamic major/minor numbers.
+
+ Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+ file system will be used instead.
config DEVTMPFS_MOUNT
- bool "Automount devtmpfs at /dev"
+ bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
depends on DEVTMPFS
help
- This will mount devtmpfs at /dev if the kernel mounts the root
- filesystem. It will not affect initramfs based mounting.
- If unsure, say N here.
+ This will instruct the kernel to automatically mount the
+ devtmpfs filesystem at /dev, directly after the kernel has
+ mounted the root filesystem. The behavior can be overridden
+ with the commandline parameter: devtmpfs.mount=0|1.
+ This option does not affect initramfs based booting, here
+ the devtmpfs filesystem always needs to be mounted manually
+ after the roots is mounted.
+ With this option enabled, it allows to bring up a system in
+ rescue mode with init=/bin/sh, even when the /dev directory
+ on the rootfs is completely empty.
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
@@ -53,7 +71,6 @@ config PREVENT_FIRMWARE_BUILD
config FW_LOADER
tristate "Userspace firmware loading support" if EMBEDDED
- depends on HOTPLUG
default y
---help---
This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index c12c7f2f2a6f..5f51c3b4451e 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -19,7 +19,5 @@ obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
-ifeq ($(CONFIG_DEBUG_DRIVER),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index b9cda053d3c0..8fc200b2e2c0 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -328,6 +328,7 @@ attribute_container_add_attrs(struct device *classdev)
return sysfs_create_group(&classdev->kobj, cont->grp);
for (i = 0; attrs[i]; i++) {
+ sysfs_attr_init(&attrs[i]->attr);
error = device_create_file(classdev, attrs[i]);
if (error)
return error;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index c0c5a43d9fb3..33c270a64db7 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/string.h>
#include "base.h"
@@ -70,7 +71,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-static struct sysfs_ops driver_sysfs_ops = {
+static const struct sysfs_ops driver_sysfs_ops = {
.show = drv_attr_show,
.store = drv_attr_store,
};
@@ -115,7 +116,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-static struct sysfs_ops bus_sysfs_ops = {
+static const struct sysfs_ops bus_sysfs_ops = {
.show = bus_attr_show,
.store = bus_attr_store,
};
@@ -154,7 +155,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
return 0;
}
-static struct kset_uevent_ops bus_uevent_ops = {
+static const struct kset_uevent_ops bus_uevent_ops = {
.filter = bus_uevent_filter,
};
@@ -173,10 +174,10 @@ static ssize_t driver_unbind(struct device_driver *drv,
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == drv) {
if (dev->parent) /* Needed for USB */
- down(&dev->parent->sem);
+ device_lock(dev->parent);
device_release_driver(dev);
if (dev->parent)
- up(&dev->parent->sem);
+ device_unlock(dev->parent);
err = count;
}
put_device(dev);
@@ -200,12 +201,12 @@ static ssize_t driver_bind(struct device_driver *drv,
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
if (dev->parent) /* Needed for USB */
- down(&dev->parent->sem);
- down(&dev->sem);
+ device_lock(dev->parent);
+ device_lock(dev);
err = driver_probe_device(drv, dev);
- up(&dev->sem);
+ device_unlock(dev);
if (dev->parent)
- up(&dev->parent->sem);
+ device_unlock(dev->parent);
if (err > 0) {
/* success */
@@ -439,22 +440,6 @@ static void device_remove_attrs(struct bus_type *bus, struct device *dev)
}
}
-#ifdef CONFIG_SYSFS_DEPRECATED
-static int make_deprecated_bus_links(struct device *dev)
-{
- return sysfs_create_link(&dev->kobj,
- &dev->bus->p->subsys.kobj, "bus");
-}
-
-static void remove_deprecated_bus_links(struct device *dev)
-{
- sysfs_remove_link(&dev->kobj, "bus");
-}
-#else
-static inline int make_deprecated_bus_links(struct device *dev) { return 0; }
-static inline void remove_deprecated_bus_links(struct device *dev) { }
-#endif
-
/**
* bus_add_device - add device to bus
* @dev: device being added
@@ -481,15 +466,10 @@ int bus_add_device(struct device *dev)
&dev->bus->p->subsys.kobj, "subsystem");
if (error)
goto out_subsys;
- error = make_deprecated_bus_links(dev);
- if (error)
- goto out_deprecated;
klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
}
return 0;
-out_deprecated:
- sysfs_remove_link(&dev->kobj, "subsystem");
out_subsys:
sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
out_id:
@@ -529,7 +509,6 @@ void bus_remove_device(struct device *dev)
{
if (dev->bus) {
sysfs_remove_link(&dev->kobj, "subsystem");
- remove_deprecated_bus_links(dev);
sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
dev_name(dev));
device_remove_attrs(dev->bus, dev);
@@ -744,10 +723,10 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
if (!dev->driver) {
if (dev->parent) /* Needed for USB */
- down(&dev->parent->sem);
+ device_lock(dev->parent);
ret = device_attach(dev);
if (dev->parent)
- up(&dev->parent->sem);
+ device_unlock(dev->parent);
}
return ret < 0 ? ret : 0;
}
@@ -779,10 +758,10 @@ int device_reprobe(struct device *dev)
{
if (dev->driver) {
if (dev->parent) /* Needed for USB */
- down(&dev->parent->sem);
+ device_lock(dev->parent);
device_release_driver(dev);
if (dev->parent)
- up(&dev->parent->sem);
+ device_unlock(dev->parent);
}
return bus_rescan_devices_helper(dev, NULL);
}
@@ -944,8 +923,8 @@ bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&bus->p->subsys);
- kfree(bus->p);
out:
+ kfree(bus->p);
bus->p = NULL;
return retval;
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746deab4b..9c63a5687d69 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -31,7 +31,7 @@ static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
ssize_t ret = -EIO;
if (class_attr->show)
- ret = class_attr->show(cp->class, buf);
+ ret = class_attr->show(cp->class, class_attr, buf);
return ret;
}
@@ -43,7 +43,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
ssize_t ret = -EIO;
if (class_attr->store)
- ret = class_attr->store(cp->class, buf, count);
+ ret = class_attr->store(cp->class, class_attr, buf, count);
return ret;
}
@@ -59,9 +59,19 @@ static void class_release(struct kobject *kobj)
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
+
+ kfree(cp);
+}
+
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+ struct class_private *cp = to_class(kobj);
+ struct class *class = cp->class;
+
+ return class->ns_type;
}
-static struct sysfs_ops class_sysfs_ops = {
+static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
};
@@ -69,6 +79,7 @@ static struct sysfs_ops class_sysfs_ops = {
static struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
+ .child_ns_type = class_child_ns_type,
};
/* Hotplug events for classes go to the class class_subsys */
@@ -173,9 +184,9 @@ int __class_register(struct class *cls, struct lock_class_key *key)
if (!cls->dev_kobj)
cls->dev_kobj = sysfs_dev_char_kobj;
-#if defined(CONFIG_SYSFS_DEPRECATED) && defined(CONFIG_BLOCK)
+#if defined(CONFIG_BLOCK)
/* let the block class directory show up in the root of sysfs */
- if (cls != &block_class)
+ if (!sysfs_deprecated || cls != &block_class)
cp->class_subsys.kobj.kset = class_kset;
#else
cp->class_subsys.kobj.kset = class_kset;
@@ -217,6 +228,8 @@ static void class_create_release(struct class *cls)
* This is used to create a struct class pointer that can then be used
* in calls to device_create().
*
+ * Returns &struct class pointer on success, or ERR_PTR() on error.
+ *
* Note, the pointer created here is to be destroyed when finished by
* making a call to class_destroy().
*/
@@ -263,25 +276,6 @@ void class_destroy(struct class *cls)
class_unregister(cls);
}
-#ifdef CONFIG_SYSFS_DEPRECATED
-char *make_class_name(const char *name, struct kobject *kobj)
-{
- char *class_name;
- int size;
-
- size = strlen(name) + strlen(kobject_name(kobj)) + 2;
-
- class_name = kmalloc(size, GFP_KERNEL);
- if (!class_name)
- return NULL;
-
- strcpy(class_name, name);
- strcat(class_name, ":");
- strcat(class_name, kobject_name(kobj));
- return class_name;
-}
-#endif
-
/**
* class_dev_iter_init - initialize class device iterator
* @iter: class iterator to initialize
@@ -488,6 +482,16 @@ void class_interface_unregister(struct class_interface *class_intf)
class_put(parent);
}
+ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ struct class_attribute_string *cs;
+ cs = container_of(attr, struct class_attribute_string, attr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
+}
+
+EXPORT_SYMBOL_GPL(show_class_attr_string);
+
struct class_compat {
struct kobject *kobj;
};
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 282025770429..6ed645411c40 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -20,13 +20,25 @@
#include <linux/notifier.h>
#include <linux/genhd.h>
#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/async.h>
#include "base.h"
#include "power/power.h"
+#ifdef CONFIG_SYSFS_DEPRECATED
+#ifdef CONFIG_SYSFS_DEPRECATED_V2
+long sysfs_deprecated = 1;
+#else
+long sysfs_deprecated = 0;
+#endif
+static __init int sysfs_deprecated_setup(char *arg)
+{
+ return strict_strtol(arg, 10, &sysfs_deprecated);
+}
+early_param("sysfs.deprecated", sysfs_deprecated_setup);
+#endif
+
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@@ -100,7 +112,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-static struct sysfs_ops dev_sysfs_ops = {
+static const struct sysfs_ops dev_sysfs_ops = {
.show = dev_attr_show,
.store = dev_attr_store,
};
@@ -132,9 +144,21 @@ static void device_release(struct kobject *kobj)
kfree(p);
}
+static const void *device_namespace(struct kobject *kobj)
+{
+ struct device *dev = to_dev(kobj);
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
+ .namespace = device_namespace,
};
@@ -192,37 +216,6 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (dev->driver)
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
-#ifdef CONFIG_SYSFS_DEPRECATED
- if (dev->class) {
- struct device *parent = dev->parent;
-
- /* find first bus device in parent chain */
- while (parent && !parent->bus)
- parent = parent->parent;
- if (parent && parent->bus) {
- const char *path;
-
- path = kobject_get_path(&parent->kobj, GFP_KERNEL);
- if (path) {
- add_uevent_var(env, "PHYSDEVPATH=%s", path);
- kfree(path);
- }
-
- add_uevent_var(env, "PHYSDEVBUS=%s", parent->bus->name);
-
- if (parent->driver)
- add_uevent_var(env, "PHYSDEVDRIVER=%s",
- parent->driver->name);
- }
- } else if (dev->bus) {
- add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
-
- if (dev->driver)
- add_uevent_var(env, "PHYSDEVDRIVER=%s",
- dev->driver->name);
- }
-#endif
-
/* have the bus specific function add its stuff */
if (dev->bus && dev->bus->uevent) {
retval = dev->bus->uevent(dev, env);
@@ -240,7 +233,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
__func__, retval);
}
- /* have the device type specific fuction add its stuff */
+ /* have the device type specific function add its stuff */
if (dev->type && dev->type->uevent) {
retval = dev->type->uevent(dev, env);
if (retval)
@@ -252,7 +245,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
return retval;
}
-static struct kset_uevent_ops device_uevent_ops = {
+static const struct kset_uevent_ops device_uevent_ops = {
.filter = dev_uevent_filter,
.name = dev_uevent_name,
.uevent = dev_uevent,
@@ -306,15 +299,10 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
{
enum kobject_action action;
- if (kobject_action_type(buf, count, &action) == 0) {
+ if (kobject_action_type(buf, count, &action) == 0)
kobject_uevent(&dev->kobj, action);
- goto out;
- }
-
- dev_err(dev, "uevent: unsupported action-string; this will "
- "be ignored in a future kernel version\n");
- kobject_uevent(&dev->kobj, KOBJ_ADD);
-out:
+ else
+ dev_err(dev, "uevent: unknown action-string\n");
return count;
}
@@ -564,32 +552,14 @@ void device_initialize(struct device *dev)
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mutex);
+ lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
- device_init_wakeup(dev, 0);
device_pm_init(dev);
set_dev_node(dev, -1);
}
-#ifdef CONFIG_SYSFS_DEPRECATED
-static struct kobject *get_device_parent(struct device *dev,
- struct device *parent)
-{
- /* class devices without a parent live in /sys/class/<classname>/ */
- if (dev->class && (!parent || parent->class != dev->class))
- return &dev->class->p->class_subsys.kobj;
- /* all other devices keep their parent */
- else if (parent)
- return &parent->kobj;
-
- return NULL;
-}
-
-static inline void cleanup_device_parent(struct device *dev) {}
-static inline void cleanup_glue_dir(struct device *dev,
- struct kobject *glue_dir) {}
-#else
static struct kobject *virtual_device_parent(struct device *dev)
{
static struct kobject *virtual_dir = NULL;
@@ -601,16 +571,74 @@ static struct kobject *virtual_device_parent(struct device *dev)
return virtual_dir;
}
-static struct kobject *get_device_parent(struct device *dev,
- struct device *parent)
+struct class_dir {
+ struct kobject kobj;
+ struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
{
+ struct class_dir *dir = to_class_dir(kobj);
+ kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
+{
+ struct class_dir *dir = to_class_dir(kobj);
+ return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+ .release = class_dir_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+ struct class_dir *dir;
int retval;
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ dir->class = class;
+ kobject_init(&dir->kobj, &class_dir_ktype);
+
+ dir->kobj.kset = &class->p->class_dirs;
+
+ retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ if (retval < 0) {
+ kobject_put(&dir->kobj);
+ return NULL;
+ }
+ return &dir->kobj;
+}
+
+
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
if (dev->class) {
+ static DEFINE_MUTEX(gdp_mutex);
struct kobject *kobj = NULL;
struct kobject *parent_kobj;
struct kobject *k;
+#ifdef CONFIG_BLOCK
+ /* block disks show up in /sys/block */
+ if (sysfs_deprecated && dev->class == &block_class) {
+ if (parent && parent->class == &block_class)
+ return &parent->kobj;
+ return &block_class.p->class_subsys.kobj;
+ }
+#endif
+
/*
* If we have no parent, we live in "virtual".
* Class-devices with a non class-device as parent, live
@@ -618,11 +646,13 @@ static struct kobject *get_device_parent(struct device *dev,
*/
if (parent == NULL)
parent_kobj = virtual_device_parent(dev);
- else if (parent->class)
+ else if (parent->class && !dev->class->ns_type)
return &parent->kobj;
else
parent_kobj = &parent->kobj;
+ mutex_lock(&gdp_mutex);
+
/* find our class-directory at the parent and reference it */
spin_lock(&dev->class->p->class_dirs.list_lock);
list_for_each_entry(k, &dev->class->p->class_dirs.list, entry)
@@ -631,20 +661,15 @@ static struct kobject *get_device_parent(struct device *dev,
break;
}
spin_unlock(&dev->class->p->class_dirs.list_lock);
- if (kobj)
+ if (kobj) {
+ mutex_unlock(&gdp_mutex);
return kobj;
+ }
/* or create a new class-directory at the parent device */
- k = kobject_create();
- if (!k)
- return NULL;
- k->kset = &dev->class->p->class_dirs;
- retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
- if (retval < 0) {
- kobject_put(k);
- return NULL;
- }
+ k = class_dir_create_and_add(dev->class, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
+ mutex_unlock(&gdp_mutex);
return k;
}
@@ -667,7 +692,6 @@ static void cleanup_device_parent(struct device *dev)
{
cleanup_glue_dir(dev, dev->kobj.parent);
}
-#endif
static void setup_parent(struct device *dev, struct device *parent)
{
@@ -690,70 +714,29 @@ static int device_add_class_symlinks(struct device *dev)
if (error)
goto out;
-#ifdef CONFIG_SYSFS_DEPRECATED
- /* stacked class devices need a symlink in the class directory */
- if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
- device_is_not_partition(dev)) {
- error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
- &dev->kobj, dev_name(dev));
- if (error)
- goto out_subsys;
- }
-
if (dev->parent && device_is_not_partition(dev)) {
- struct device *parent = dev->parent;
- char *class_name;
-
- /*
- * stacked class devices have the 'device' link
- * pointing to the bus device instead of the parent
- */
- while (parent->class && !parent->bus && parent->parent)
- parent = parent->parent;
-
- error = sysfs_create_link(&dev->kobj,
- &parent->kobj,
+ error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
"device");
if (error)
- goto out_busid;
-
- class_name = make_class_name(dev->class->name,
- &dev->kobj);
- if (class_name)
- error = sysfs_create_link(&dev->parent->kobj,
- &dev->kobj, class_name);
- kfree(class_name);
- if (error)
- goto out_device;
+ goto out_subsys;
}
- return 0;
-out_device:
- if (dev->parent && device_is_not_partition(dev))
- sysfs_remove_link(&dev->kobj, "device");
-out_busid:
- if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
- device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
- dev_name(dev));
-#else
+#ifdef CONFIG_BLOCK
+ /* /sys/block has directories and does not need symlinks */
+ if (sysfs_deprecated && dev->class == &block_class)
+ return 0;
+#endif
+
/* link in the class directory pointing to the device */
error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
&dev->kobj, dev_name(dev));
if (error)
- goto out_subsys;
+ goto out_device;
- if (dev->parent && device_is_not_partition(dev)) {
- error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
- "device");
- if (error)
- goto out_busid;
- }
return 0;
-out_busid:
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
-#endif
+out_device:
+ sysfs_remove_link(&dev->kobj, "device");
out_subsys:
sysfs_remove_link(&dev->kobj, "subsystem");
@@ -766,30 +749,14 @@ static void device_remove_class_symlinks(struct device *dev)
if (!dev->class)
return;
-#ifdef CONFIG_SYSFS_DEPRECATED
- if (dev->parent && device_is_not_partition(dev)) {
- char *class_name;
-
- class_name = make_class_name(dev->class->name, &dev->kobj);
- if (class_name) {
- sysfs_remove_link(&dev->parent->kobj, class_name);
- kfree(class_name);
- }
- sysfs_remove_link(&dev->kobj, "device");
- }
-
- if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
- device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
- dev_name(dev));
-#else
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
-
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
-#endif
-
sysfs_remove_link(&dev->kobj, "subsystem");
+#ifdef CONFIG_BLOCK
+ if (sysfs_deprecated && dev->class == &block_class)
+ return;
+#endif
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
}
/**
@@ -1341,6 +1308,8 @@ static void root_device_release(struct device *dev)
* 'module' symlink which points to the @owner directory
* in sysfs.
*
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
* Note: You probably want to use root_device_register().
*/
struct device *__root_device_register(const char *name, struct module *owner)
@@ -1366,7 +1335,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
return ERR_PTR(err);
}
-#ifdef CONFIG_MODULE /* gotta find a "cleaner" way to do this */
+#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
if (owner) {
struct module_kobject *mk = &owner->mkobj;
@@ -1428,6 +1397,8 @@ static void device_create_release(struct device *dev)
* Any further sysfs files that might be required can be created using this
* pointer.
*
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
* Note: the struct class passed to this function must have previously
* been created with a call to class_create().
*/
@@ -1488,6 +1459,8 @@ EXPORT_SYMBOL_GPL(device_create_vargs);
* Any further sysfs files that might be required can be created using this
* pointer.
*
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
* Note: the struct class passed to this function must have previously
* been created with a call to class_create().
*/
@@ -1541,7 +1514,7 @@ EXPORT_SYMBOL_GPL(device_destroy);
* on the same device to ensure that new_name is valid and
* won't conflict with other devices.
*/
-int device_rename(struct device *dev, char *new_name)
+int device_rename(struct device *dev, const char *new_name)
{
char *old_class_name = NULL;
char *new_class_name = NULL;
@@ -1555,43 +1528,22 @@ int device_rename(struct device *dev, char *new_name)
pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
__func__, new_name);
-#ifdef CONFIG_SYSFS_DEPRECATED
- if ((dev->class) && (dev->parent))
- old_class_name = make_class_name(dev->class->name, &dev->kobj);
-#endif
-
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!old_device_name) {
error = -ENOMEM;
goto out;
}
- error = kobject_rename(&dev->kobj, new_name);
- if (error)
- goto out;
-
-#ifdef CONFIG_SYSFS_DEPRECATED
- if (old_class_name) {
- new_class_name = make_class_name(dev->class->name, &dev->kobj);
- if (new_class_name) {
- error = sysfs_create_link_nowarn(&dev->parent->kobj,
- &dev->kobj,
- new_class_name);
- if (error)
- goto out;
- sysfs_remove_link(&dev->parent->kobj, old_class_name);
- }
- }
-#else
if (dev->class) {
- error = sysfs_create_link_nowarn(&dev->class->p->class_subsys.kobj,
- &dev->kobj, dev_name(dev));
+ error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, old_device_name, new_name);
if (error)
goto out;
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
- old_device_name);
}
-#endif
+
+ error = kobject_rename(&dev->kobj, new_name);
+ if (error)
+ goto out;
out:
put_device(dev);
@@ -1609,40 +1561,13 @@ static int device_move_class_links(struct device *dev,
struct device *new_parent)
{
int error = 0;
-#ifdef CONFIG_SYSFS_DEPRECATED
- char *class_name;
- class_name = make_class_name(dev->class->name, &dev->kobj);
- if (!class_name) {
- error = -ENOMEM;
- goto out;
- }
- if (old_parent) {
- sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(&old_parent->kobj, class_name);
- }
- if (new_parent) {
- error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
- "device");
- if (error)
- goto out;
- error = sysfs_create_link(&new_parent->kobj, &dev->kobj,
- class_name);
- if (error)
- sysfs_remove_link(&dev->kobj, "device");
- } else
- error = 0;
-out:
- kfree(class_name);
- return error;
-#else
if (old_parent)
sysfs_remove_link(&dev->kobj, "device");
if (new_parent)
error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
"device");
return error;
-#endif
}
/**
@@ -1731,10 +1656,25 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev, *devn;
+ struct device *dev;
+
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+ * Beware that device unplug events may also start pulling
+ * devices offline, even as the system is shutting down.
+ */
+ while (!list_empty(&devices_kset->list)) {
+ dev = list_entry(devices_kset->list.prev, struct device,
+ kobj.entry);
+ get_device(dev);
+ /*
+ * Make sure the device is off the kset list, in the
+ * event that dev->*->shutdown() doesn't remove it.
+ */
+ list_del_init(&dev->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
- list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
- kobj.entry) {
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
dev->bus->shutdown(dev);
@@ -1742,6 +1682,74 @@ void device_shutdown(void)
dev_dbg(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
+ put_device(dev);
+
+ spin_lock(&devices_kset->list_lock);
}
+ spin_unlock(&devices_kset->list_lock);
async_synchronize_full();
}
+
+/*
+ * Device logging functions
+ */
+
+#ifdef CONFIG_PRINTK
+
+static int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{
+ if (!dev)
+ return printk("%s(NULL device *): %pV", level, vaf);
+
+ return printk("%s%s %s: %pV",
+ level, dev_driver_string(dev), dev_name(dev), vaf);
+}
+
+int dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = __dev_printk(level, dev, &vaf);
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(dev_printk);
+
+#define define_dev_printk_level(func, kern_level) \
+int func(const struct device *dev, const char *fmt, ...) \
+{ \
+ struct va_format vaf; \
+ va_list args; \
+ int r; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ r = __dev_printk(kern_level, dev, &vaf); \
+ va_end(args); \
+ \
+ return r; \
+} \
+EXPORT_SYMBOL(func);
+
+define_dev_printk_level(dev_emerg, KERN_EMERG);
+define_dev_printk_level(dev_alert, KERN_ALERT);
+define_dev_printk_level(dev_crit, KERN_CRIT);
+define_dev_printk_level(dev_err, KERN_ERR);
+define_dev_printk_level(dev_warn, KERN_WARNING);
+define_dev_printk_level(dev_notice, KERN_NOTICE);
+define_dev_printk_level(_dev_info, KERN_INFO);
+
+#endif
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 958bd1540c30..251acea3d359 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -10,11 +10,15 @@
#include <linux/topology.h>
#include <linux/device.h>
#include <linux/node.h>
+#include <linux/gfp.h>
#include "base.h"
+static struct sysdev_class_attribute *cpu_sysdev_class_attrs[];
+
struct sysdev_class cpu_sysdev_class = {
.name = "cpu",
+ .attrs = cpu_sysdev_class_attrs,
};
EXPORT_SYMBOL(cpu_sysdev_class);
@@ -76,34 +80,24 @@ void unregister_cpu(struct cpu *cpu)
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct class *class, const char *buf,
+static ssize_t cpu_probe_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf,
size_t count)
{
return arch_cpu_probe(buf, count);
}
-static ssize_t cpu_release_store(struct class *class, const char *buf,
+static ssize_t cpu_release_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf,
size_t count)
{
return arch_cpu_release(buf, count);
}
-static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
-
-int __init cpu_probe_release_init(void)
-{
- int rc;
-
- rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &class_attr_probe.attr);
- if (!rc)
- rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &class_attr_release.attr);
-
- return rc;
-}
-device_initcall(cpu_probe_release_init);
+static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -141,31 +135,39 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
/*
* Print cpu online, possible, present, and system maps
*/
-static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
+
+struct cpu_attr {
+ struct sysdev_class_attribute attr;
+ const struct cpumask *const * const map;
+};
+
+static ssize_t show_cpus_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ char *buf)
{
- int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
+ struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
+ int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
buf[n++] = '\n';
buf[n] = '\0';
return n;
}
-#define print_cpus_func(type) \
-static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
-{ \
- return print_cpus_map(buf, cpu_##type##_mask); \
-} \
-static struct sysdev_class_attribute attr_##type##_map = \
- _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
+#define _CPU_ATTR(name, map) \
+ { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map }
-print_cpus_func(online);
-print_cpus_func(possible);
-print_cpus_func(present);
+/* Keep in sync with cpu_sysdev_class_attrs */
+static struct cpu_attr cpu_attrs[] = {
+ _CPU_ATTR(online, &cpu_online_mask),
+ _CPU_ATTR(possible, &cpu_possible_mask),
+ _CPU_ATTR(present, &cpu_present_mask),
+};
/*
* Print values for NR_CPUS and offlined cpus
*/
-static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf)
+static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
{
int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
return n;
@@ -175,7 +177,8 @@ static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
unsigned int total_cpus;
-static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
+static ssize_t print_cpus_offline(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
{
int n = 0, len = PAGE_SIZE-2;
cpumask_var_t offline;
@@ -183,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
- cpumask_complement(offline, cpu_online_mask);
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = cpulist_scnprintf(buf, len, offline);
free_cpumask_var(offline);
@@ -204,29 +207,6 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
}
static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
-static struct sysdev_class_attribute *cpu_state_attr[] = {
- &attr_online_map,
- &attr_possible_map,
- &attr_present_map,
- &attr_kernel_max,
- &attr_offline,
-};
-
-static int cpu_states_init(void)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < ARRAY_SIZE(cpu_state_attr); i++) {
- int ret;
- ret = sysdev_class_create_file(&cpu_sysdev_class,
- cpu_state_attr[i]);
- if (!err)
- err = ret;
- }
- return err;
-}
-
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -272,9 +252,6 @@ int __init cpu_dev_init(void)
int err;
err = sysdev_class_register(&cpu_sysdev_class);
- if (!err)
- err = cpu_states_init();
-
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
if (!err)
err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
@@ -282,3 +259,16 @@ int __init cpu_dev_init(void)
return err;
}
+
+static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ &attr_probe,
+ &attr_release,
+#endif
+ &cpu_attrs[0].attr,
+ &cpu_attrs[1].attr,
+ &cpu_attrs[2].attr,
+ &attr_kernel_max,
+ &attr_offline,
+ NULL
+};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index ee95c76bfd3d..da57ee9d63fe 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,17 +40,21 @@ static void driver_bound(struct device *dev)
pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
__func__, dev->driver->name);
+ klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
-
- klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
}
static int driver_sysfs_add(struct device *dev)
{
int ret;
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_BIND_DRIVER, dev);
+
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
kobject_name(&dev->kobj));
if (ret == 0) {
@@ -85,7 +89,7 @@ static void driver_sysfs_remove(struct device *dev)
* for before calling this. (It is ok to call with no other effort
* from a driver's probe() method.)
*
- * This function must be called with @dev->sem held.
+ * This function must be called with the device lock held.
*/
int device_bind_driver(struct device *dev)
{
@@ -190,8 +194,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
* This function returns -ENODEV if the device is not registered,
* 1 if the device is bound successfully and 0 otherwise.
*
- * This function must be called with @dev->sem held. When called for a
- * USB interface, @dev->parent->sem must be held as well.
+ * This function must be called with @dev lock held. When called for a
+ * USB interface, @dev->parent lock must be held as well.
*/
int driver_probe_device(struct device_driver *drv, struct device *dev)
{
@@ -233,13 +237,13 @@ static int __device_attach(struct device_driver *drv, void *data)
* 0 if no matching driver was found;
* -ENODEV if the device is not registered.
*
- * When called for a USB interface, @dev->parent->sem must be held.
+ * When called for a USB interface, @dev->parent lock must be held.
*/
int device_attach(struct device *dev)
{
int ret = 0;
- down(&dev->sem);
+ device_lock(dev);
if (dev->driver) {
ret = device_bind_driver(dev);
if (ret == 0)
@@ -253,7 +257,7 @@ int device_attach(struct device *dev)
ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
pm_runtime_put_sync(dev);
}
- up(&dev->sem);
+ device_unlock(dev);
return ret;
}
EXPORT_SYMBOL_GPL(device_attach);
@@ -276,13 +280,13 @@ static int __driver_attach(struct device *dev, void *data)
return 0;
if (dev->parent) /* Needed for USB */
- down(&dev->parent->sem);
- down(&dev->sem);
+ device_lock(dev->parent);
+ device_lock(dev);
if (!dev->driver)
driver_probe_device(drv, dev);
- up(&dev->sem);
+ device_unlock(dev);
if (dev->parent)
- up(&dev->parent->sem);
+ device_unlock(dev->parent);
return 0;
}
@@ -303,8 +307,8 @@ int driver_attach(struct device_driver *drv)
EXPORT_SYMBOL_GPL(driver_attach);
/*
- * __device_release_driver() must be called with @dev->sem held.
- * When called for a USB interface, @dev->parent->sem must be held as well.
+ * __device_release_driver() must be called with @dev lock held.
+ * When called for a USB interface, @dev->parent lock must be held as well.
*/
static void __device_release_driver(struct device *dev)
{
@@ -343,7 +347,7 @@ static void __device_release_driver(struct device *dev)
* @dev: device.
*
* Manually detach device from driver.
- * When called for a USB interface, @dev->parent->sem must be held.
+ * When called for a USB interface, @dev->parent lock must be held.
*/
void device_release_driver(struct device *dev)
{
@@ -352,9 +356,9 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
- down(&dev->sem);
+ device_lock(dev);
__device_release_driver(dev);
- up(&dev->sem);
+ device_unlock(dev);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@@ -381,13 +385,13 @@ void driver_detach(struct device_driver *drv)
spin_unlock(&drv->p->klist_devices.k_lock);
if (dev->parent) /* Needed for USB */
- down(&dev->parent->sem);
- down(&dev->sem);
+ device_lock(dev->parent);
+ device_lock(dev);
if (dev->driver == drv)
__device_release_driver(dev);
- up(&dev->sem);
+ device_unlock(dev);
if (dev->parent)
- up(&dev->parent->sem);
+ device_unlock(dev->parent);
put_device(dev);
}
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 05dd307e8f02..cf7a0c788052 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "base.h"
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 42ae452b36b0..af0600143d1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -20,9 +20,11 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/init_task.h>
+#include <linux/slab.h>
static struct vfsmount *dev_mnt;
@@ -44,7 +46,11 @@ __setup("devtmpfs.mount=", mount_param);
static int dev_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
+#ifdef CONFIG_TMPFS
return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+#else
+ return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+#endif
}
static struct file_system_type dev_fs_type = {
@@ -301,6 +307,19 @@ int devtmpfs_delete_node(struct device *dev)
if (dentry->d_inode) {
err = vfs_getattr(nd.path.mnt, dentry, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = 0;
+ newattrs.ia_gid = 0;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
err = vfs_unlink(nd.path.dentry->d_inode,
dentry);
if (!err || err == -ENOENT)
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 962a3b574f21..f369e2795985 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -2,12 +2,13 @@
* Coherent per-device memory handling.
* Borrowed from i386
*/
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
struct dma_coherent_mem {
void *virt_base;
- u32 device_base;
+ dma_addr_t device_base;
int size;
int flags;
unsigned long *bitmap;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index ca9186f70a69..763d59c1eb65 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -8,6 +8,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
/*
* Managed DMA API
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 90c9fff09ead..b631f7c59453 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include "base.h"
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index a95024166b66..40af43ebd92d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -19,7 +19,7 @@
#include <linux/kthread.h>
#include <linux/highmem.h>
#include <linux/firmware.h>
-#include "base.h"
+#include <linux/slab.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
@@ -27,6 +27,52 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
+/* Builtin firmware support */
+
+#ifdef CONFIG_FW_LOADER
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+ if (strcmp(name, b_fw->name) == 0) {
+ fw->size = b_fw->size;
+ fw->data = b_fw->data;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+ if (fw->data == b_fw->data)
+ return true;
+
+ return false;
+}
+
+#else /* Module case - no builtin firmware support */
+
+static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ return false;
+}
+
+static inline bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ return false;
+}
+#endif
+
enum {
FW_STATUS_LOADING,
FW_STATUS_DONE,
@@ -40,36 +86,33 @@ static int loading_timeout = 60; /* In seconds */
static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
- char *fw_id;
struct completion completion;
- struct bin_attribute attr_data;
struct firmware *fw;
unsigned long status;
struct page **pages;
int nr_pages;
int page_array_size;
- const char *vdata;
struct timer_list timeout;
+ struct device dev;
+ bool nowait;
+ char fw_id[];
};
-#ifdef CONFIG_FW_LOADER
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-#else /* Module case. Avoid ifdefs later; it'll all optimise out */
-static struct builtin_fw *__start_builtin_fw;
-static struct builtin_fw *__end_builtin_fw;
-#endif
+static struct firmware_priv *to_firmware_priv(struct device *dev)
+{
+ return container_of(dev, struct firmware_priv, dev);
+}
-static void
-fw_load_abort(struct firmware_priv *fw_priv)
+static void fw_load_abort(struct firmware_priv *fw_priv)
{
set_bit(FW_STATUS_ABORT, &fw_priv->status);
wmb();
complete(&fw_priv->completion);
}
-static ssize_t
-firmware_timeout_show(struct class *class, char *buf)
+static ssize_t firmware_timeout_show(struct class *class,
+ struct class_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", loading_timeout);
}
@@ -77,6 +120,7 @@ firmware_timeout_show(struct class *class, char *buf)
/**
* firmware_timeout_store - set number of seconds to wait for firmware
* @class: device class pointer
+ * @attr: device attribute pointer
* @buf: buffer to scan for timeout value
* @count: number of bytes in @buf
*
@@ -86,33 +130,53 @@ firmware_timeout_show(struct class *class, char *buf)
*
* Note: zero means 'wait forever'.
**/
-static ssize_t
-firmware_timeout_store(struct class *class, const char *buf, size_t count)
+static ssize_t firmware_timeout_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
if (loading_timeout < 0)
loading_timeout = 0;
+
return count;
}
-static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
+static struct class_attribute firmware_class_attrs[] = {
+ __ATTR(timeout, S_IWUSR | S_IRUGO,
+ firmware_timeout_show, firmware_timeout_store),
+ __ATTR_NULL
+};
+
+static void fw_dev_release(struct device *dev)
+{
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ int i;
-static void fw_dev_release(struct device *dev);
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kfree(fw_priv->pages);
+ kfree(fw_priv);
+
+ module_put(THIS_MODULE);
+}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
+ return -ENOMEM;
return 0;
}
static struct class firmware_class = {
.name = "firmware",
+ .class_attrs = firmware_class_attrs,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -120,11 +184,23 @@ static struct class firmware_class = {
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status);
+
return sprintf(buf, "%d\n", loading);
}
+static void firmware_free_data(const struct firmware *fw)
+{
+ int i;
+ vunmap(fw->data);
+ if (fw->pages) {
+ for (i = 0; i < PFN_UP(fw->size); i++)
+ __free_page(fw->pages[i]);
+ kfree(fw->pages);
+ }
+}
+
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -146,7 +222,7 @@ static ssize_t firmware_loading_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = simple_strtol(buf, NULL, 10);
int i;
@@ -157,21 +233,21 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_unlock(&fw_lock);
break;
}
- vfree(fw_priv->fw->data);
- fw_priv->fw->data = NULL;
+ firmware_free_data(fw_priv->fw);
+ memset(fw_priv->fw, 0, sizeof(struct firmware));
+ /* If the pages are not owned by 'struct firmware' */
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
- fw_priv->fw->size = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vfree(fw_priv->fw->data);
+ vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
0, PAGE_KERNEL_RO);
@@ -179,7 +255,10 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: vmap() failed\n", __func__);
goto err;
}
- /* Pages will be freed by vfree() */
+ /* Pages are now owned by 'struct firmware' */
+ fw_priv->fw->pages = fw_priv->pages;
+ fw_priv->pages = NULL;
+
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
@@ -201,12 +280,12 @@ static ssize_t firmware_loading_store(struct device *dev,
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-static ssize_t
-firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware *fw;
ssize_t ret_count;
@@ -245,8 +324,7 @@ out:
return ret_count;
}
-static int
-fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
+static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
{
int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -286,6 +364,7 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
/**
* firmware_data_write - write method for firmware
+ * @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
@@ -295,12 +374,12 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
* Data written to the 'data' attribute will be later handed to
* the driver as a firmware image.
**/
-static ssize_t
-firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware *fw;
ssize_t retval;
@@ -341,136 +420,103 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data_tmpl = {
- .attr = {.name = "data", .mode = 0644},
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
.write = firmware_data_write,
};
-static void fw_dev_release(struct device *dev)
-{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- kfree(fw_priv->fw_id);
- kfree(fw_priv);
- kfree(dev);
-
- module_put(THIS_MODULE);
-}
-
-static void
-firmware_class_timeout(u_long data)
+static void firmware_class_timeout(u_long data)
{
struct firmware_priv *fw_priv = (struct firmware_priv *) data;
+
fw_load_abort(fw_priv);
}
-static int fw_register_device(struct device **dev_p, const char *fw_name,
- struct device *device)
+static struct firmware_priv *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, bool uevent, bool nowait)
{
- int retval;
- struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
- GFP_KERNEL);
- struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
-
- *dev_p = NULL;
+ struct firmware_priv *fw_priv;
+ struct device *f_dev;
+ int error;
- if (!fw_priv || !f_dev) {
+ fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
+ if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
- retval = -ENOMEM;
- goto error_kfree;
+ error = -ENOMEM;
+ goto err_out;
}
+ fw_priv->fw = firmware;
+ fw_priv->nowait = nowait;
+ strcpy(fw_priv->fw_id, fw_name);
init_completion(&fw_priv->completion);
- fw_priv->attr_data = firmware_attr_data_tmpl;
- fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
- if (!fw_priv->fw_id) {
- dev_err(device, "%s: Firmware name allocation failed\n",
- __func__);
- retval = -ENOMEM;
- goto error_kfree;
- }
+ setup_timer(&fw_priv->timeout,
+ firmware_class_timeout, (u_long) fw_priv);
- fw_priv->timeout.function = firmware_class_timeout;
- fw_priv->timeout.data = (u_long) fw_priv;
- init_timer(&fw_priv->timeout);
+ f_dev = &fw_priv->dev;
+ device_initialize(f_dev);
dev_set_name(f_dev, "%s", dev_name(device));
f_dev->parent = device;
f_dev->class = &firmware_class;
- dev_set_drvdata(f_dev, fw_priv);
- dev_set_uevent_suppress(f_dev, 1);
- retval = device_register(f_dev);
- if (retval) {
- dev_err(device, "%s: device_register failed\n", __func__);
- put_device(f_dev);
- return retval;
- }
- *dev_p = f_dev;
- return 0;
-
-error_kfree:
- kfree(f_dev);
- kfree(fw_priv);
- return retval;
-}
-
-static int fw_setup_device(struct firmware *fw, struct device **dev_p,
- const char *fw_name, struct device *device,
- int uevent)
-{
- struct device *f_dev;
- struct firmware_priv *fw_priv;
- int retval;
- *dev_p = NULL;
- retval = fw_register_device(&f_dev, fw_name, device);
- if (retval)
- goto out;
+ dev_set_uevent_suppress(f_dev, true);
/* Need to pin this module until class device is destroyed */
__module_get(THIS_MODULE);
- fw_priv = dev_get_drvdata(f_dev);
+ error = device_add(f_dev);
+ if (error) {
+ dev_err(device, "%s: device_register failed\n", __func__);
+ goto err_put_dev;
+ }
- fw_priv->fw = fw;
- retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
- if (retval) {
+ error = device_create_bin_file(f_dev, &firmware_attr_data);
+ if (error) {
dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
- goto error_unreg;
+ goto err_del_dev;
}
- retval = device_create_file(f_dev, &dev_attr_loading);
- if (retval) {
+ error = device_create_file(f_dev, &dev_attr_loading);
+ if (error) {
dev_err(device, "%s: device_create_file failed\n", __func__);
- goto error_unreg;
+ goto err_del_bin_attr;
}
if (uevent)
- dev_set_uevent_suppress(f_dev, 0);
- *dev_p = f_dev;
- goto out;
+ dev_set_uevent_suppress(f_dev, false);
+
+ return fw_priv;
+
+err_del_bin_attr:
+ device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+err_out:
+ return ERR_PTR(error);
+}
+
+static void fw_destroy_instance(struct firmware_priv *fw_priv)
+{
+ struct device *f_dev = &fw_priv->dev;
-error_unreg:
+ device_remove_file(f_dev, &dev_attr_loading);
+ device_remove_bin_file(f_dev, &firmware_attr_data);
device_unregister(f_dev);
-out:
- return retval;
}
-static int
-_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, int uevent)
+static int _request_firmware(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ bool uevent, bool nowait)
{
- struct device *f_dev;
struct firmware_priv *fw_priv;
struct firmware *firmware;
- struct builtin_fw *builtin;
- int retval;
+ int retval = 0;
if (!firmware_p)
return -EINVAL;
@@ -483,54 +529,48 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (strcmp(name, builtin->name))
- continue;
- dev_info(device, "firmware: using built-in firmware %s\n",
- name);
- firmware->size = builtin->size;
- firmware->data = builtin->data;
+ if (fw_get_builtin_firmware(firmware, name)) {
+ dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0;
}
if (uevent)
- dev_info(device, "firmware: requesting %s\n", name);
+ dev_dbg(device, "firmware: requesting %s\n", name);
- retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
- if (retval)
- goto error_kfree_fw;
-
- fw_priv = dev_get_drvdata(f_dev);
+ fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ if (IS_ERR(fw_priv)) {
+ retval = PTR_ERR(fw_priv);
+ goto out;
+ }
if (uevent) {
- if (loading_timeout > 0) {
- fw_priv->timeout.expires = jiffies + loading_timeout * HZ;
- add_timer(&fw_priv->timeout);
- }
+ if (loading_timeout > 0)
+ mod_timer(&fw_priv->timeout,
+ round_jiffies_up(jiffies +
+ loading_timeout * HZ));
- kobject_uevent(&f_dev->kobj, KOBJ_ADD);
- wait_for_completion(&fw_priv->completion);
- set_bit(FW_STATUS_DONE, &fw_priv->status);
- del_timer_sync(&fw_priv->timeout);
- } else
- wait_for_completion(&fw_priv->completion);
+ kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
+ }
+
+ wait_for_completion(&fw_priv->completion);
+
+ set_bit(FW_STATUS_DONE, &fw_priv->status);
+ del_timer_sync(&fw_priv->timeout);
mutex_lock(&fw_lock);
- if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) {
+ if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status))
retval = -ENOENT;
- release_firmware(fw_priv->fw);
- *firmware_p = NULL;
- }
fw_priv->fw = NULL;
mutex_unlock(&fw_lock);
- device_unregister(f_dev);
- goto out;
-error_kfree_fw:
- kfree(firmware);
- *firmware_p = NULL;
+ fw_destroy_instance(fw_priv);
+
out:
+ if (retval) {
+ release_firmware(firmware);
+ *firmware_p = NULL;
+ }
+
return retval;
}
@@ -554,26 +594,18 @@ request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int uevent = 1;
- return _request_firmware(firmware_p, name, device, uevent);
+ return _request_firmware(firmware_p, name, device, uevent, false);
}
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
-void
-release_firmware(const struct firmware *fw)
+void release_firmware(const struct firmware *fw)
{
- struct builtin_fw *builtin;
-
if (fw) {
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (fw->data == builtin->data)
- goto free_fw;
- }
- vfree(fw->data);
- free_fw:
+ if (!fw_is_builtin_firmware(fw))
+ firmware_free_data(fw);
kfree(fw);
}
}
@@ -589,28 +621,29 @@ struct firmware_work {
int uevent;
};
-static int
-request_firmware_work_func(void *arg)
+static int request_firmware_work_func(void *arg)
{
struct firmware_work *fw_work = arg;
const struct firmware *fw;
int ret;
+
if (!arg) {
WARN_ON(1);
return 0;
}
- ret = _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent);
+ ret = _request_firmware(&fw, fw_work->name, fw_work->device,
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
+
return ret;
}
/**
- * request_firmware_nowait: asynchronous version of request_firmware
+ * request_firmware_nowait - asynchronous version of request_firmware
* @module: module requesting the firmware
* @uevent: sends uevent to copy the firmware image if this flag
* is non-zero else the firmware copy must be done manually.
@@ -633,57 +666,42 @@ request_firmware_nowait(
void (*cont)(const struct firmware *fw, void *context))
{
struct task_struct *task;
- struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work),
- gfp);
+ struct firmware_work *fw_work;
+ fw_work = kzalloc(sizeof (struct firmware_work), gfp);
if (!fw_work)
return -ENOMEM;
+
+ fw_work->module = module;
+ fw_work->name = name;
+ fw_work->device = device;
+ fw_work->context = context;
+ fw_work->cont = cont;
+ fw_work->uevent = uevent;
+
if (!try_module_get(module)) {
kfree(fw_work);
return -EFAULT;
}
- *fw_work = (struct firmware_work) {
- .module = module,
- .name = name,
- .device = device,
- .context = context,
- .cont = cont,
- .uevent = uevent,
- };
-
task = kthread_run(request_firmware_work_func, fw_work,
"firmware/%s", name);
-
if (IS_ERR(task)) {
fw_work->cont(NULL, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
return PTR_ERR(task);
}
+
return 0;
}
-static int __init
-firmware_class_init(void)
+static int __init firmware_class_init(void)
{
- int error;
- error = class_register(&firmware_class);
- if (error) {
- printk(KERN_ERR "%s: class_register failed\n", __func__);
- return error;
- }
- error = class_create_file(&firmware_class, &class_attr_timeout);
- if (error) {
- printk(KERN_ERR "%s: class_create_file failed\n",
- __func__);
- class_unregister(&firmware_class);
- }
- return error;
-
+ return class_register(&firmware_class);
}
-static void __exit
-firmware_class_exit(void)
+
+static void __exit firmware_class_exit(void)
{
class_unregister(&firmware_class);
}
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
index 8ad4ffea6920..6e6b6a11b3ce 100644
--- a/drivers/base/iommu.c
+++ b/drivers/base/iommu.c
@@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
-int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return iommu_ops->map(domain, iova, paddr, size, prot);
-}
-EXPORT_SYMBOL_GPL(iommu_map_range);
-
-void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
- size_t size)
-{
- iommu_ops->unmap(domain, iova, size);
-}
-EXPORT_SYMBOL_GPL(iommu_unmap_range);
-
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
@@ -107,3 +93,32 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
return iommu_ops->domain_has_cap(domain, cap);
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, int gfp_order, int prot)
+{
+ unsigned long invalid_mask;
+ size_t size;
+
+ size = 0x1000UL << gfp_order;
+ invalid_mask = size - 1;
+
+ BUG_ON((iova | paddr) & invalid_mask);
+
+ return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
+}
+EXPORT_SYMBOL_GPL(iommu_map);
+
+int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+{
+ unsigned long invalid_mask;
+ size_t size;
+
+ size = 0x1000UL << gfp_order;
+ invalid_mask = size - 1;
+
+ BUG_ON(iova & invalid_mask);
+
+ return iommu_ops->unmap(domain, iova, gfp_order);
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bd025059711f..cafeaaf0428f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -22,10 +22,13 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/stat.h>
+#include <linux/slab.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
+static DEFINE_MUTEX(mem_sysfs_mutex);
+
#define MEMORY_CLASS_NAME "memory"
static struct sysdev_class memory_sysdev_class = {
@@ -44,7 +47,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
return retval;
}
-static struct kset_uevent_ops memory_uevent_ops = {
+static const struct kset_uevent_ops memory_uevent_ops = {
.name = memory_uevent_name,
.uevent = memory_uevent,
};
@@ -309,17 +312,18 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
* Block size attribute stuff
*/
static ssize_t
-print_block_size(struct class *class, char *buf)
+print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
+ char *buf)
{
- return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
+ return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
}
-static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
static int block_size_init(void)
{
return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_block_size_bytes.attr);
+ &attr_block_size_bytes.attr);
}
/*
@@ -330,7 +334,8 @@ static int block_size_init(void)
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
-memory_probe_store(struct class *class, const char *buf, size_t count)
+memory_probe_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
{
u64 phys_addr;
int nid;
@@ -367,7 +372,9 @@ static inline int memory_probe_init(void)
/* Soft offline a page */
static ssize_t
-store_soft_offline_page(struct class *class, const char *buf, size_t count)
+store_soft_offline_page(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
u64 pfn;
@@ -384,7 +391,9 @@ store_soft_offline_page(struct class *class, const char *buf, size_t count)
/* Forcibly offline a page, including killing processes. */
static ssize_t
-store_hard_offline_page(struct class *class, const char *buf, size_t count)
+store_hard_offline_page(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
u64 pfn;
@@ -423,21 +432,68 @@ static inline int memory_fail_init(void)
* differentiation between which *physical* devices each
* section belongs to...
*/
+int __weak arch_get_memory_phys_device(unsigned long start_pfn)
+{
+ return 0;
+}
+
+struct memory_block *find_memory_block_hinted(struct mem_section *section,
+ struct memory_block *hint)
+{
+ struct kobject *kobj;
+ struct sys_device *sysdev;
+ struct memory_block *mem;
+ char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
+
+ kobj = hint ? &hint->sysdev.kobj : NULL;
+
+ /*
+ * This only works because we know that section == sysdev->id
+ * slightly redundant with sysdev_register()
+ */
+ sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section));
+
+ kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
+ if (!kobj)
+ return NULL;
+
+ sysdev = container_of(kobj, struct sys_device, kobj);
+ mem = container_of(sysdev, struct memory_block, sysdev);
+
+ return mem;
+}
+
+/*
+ * For now, we have a linear search to go find the appropriate
+ * memory_block corresponding to a particular phys_index. If
+ * this gets to be a real problem, we can always use a radix
+ * tree or something here.
+ *
+ * This could be made generic for all sysdev classes.
+ */
+struct memory_block *find_memory_block(struct mem_section *section)
+{
+ return find_memory_block_hinted(section, NULL);
+}
static int add_memory_block(int nid, struct mem_section *section,
- unsigned long state, int phys_device,
- enum mem_add_context context)
+ unsigned long state, enum mem_add_context context)
{
struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ unsigned long start_pfn;
int ret = 0;
if (!mem)
return -ENOMEM;
+ mutex_lock(&mem_sysfs_mutex);
+
mem->phys_index = __section_nr(section);
mem->state = state;
+ mem->section_count++;
mutex_init(&mem->state_mutex);
- mem->phys_device = phys_device;
+ start_pfn = section_nr_to_pfn(mem->phys_index);
+ mem->phys_device = arch_get_memory_phys_device(start_pfn);
ret = register_memory(mem, section);
if (!ret)
@@ -453,53 +509,29 @@ static int add_memory_block(int nid, struct mem_section *section,
ret = register_mem_sect_under_node(mem, nid);
}
+ mutex_unlock(&mem_sysfs_mutex);
return ret;
}
-/*
- * For now, we have a linear search to go find the appropriate
- * memory_block corresponding to a particular phys_index. If
- * this gets to be a real problem, we can always use a radix
- * tree or something here.
- *
- * This could be made generic for all sysdev classes.
- */
-struct memory_block *find_memory_block(struct mem_section *section)
-{
- struct kobject *kobj;
- struct sys_device *sysdev;
- struct memory_block *mem;
- char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
-
- /*
- * This only works because we know that section == sysdev->id
- * slightly redundant with sysdev_register()
- */
- sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section));
-
- kobj = kset_find_obj(&memory_sysdev_class.kset, name);
- if (!kobj)
- return NULL;
-
- sysdev = container_of(kobj, struct sys_device, kobj);
- mem = container_of(sysdev, struct memory_block, sysdev);
-
- return mem;
-}
-
int remove_memory_block(unsigned long node_id, struct mem_section *section,
int phys_device)
{
struct memory_block *mem;
+ mutex_lock(&mem_sysfs_mutex);
mem = find_memory_block(section);
- unregister_mem_sect_under_nodes(mem);
- mem_remove_simple_file(mem, phys_index);
- mem_remove_simple_file(mem, state);
- mem_remove_simple_file(mem, phys_device);
- mem_remove_simple_file(mem, removable);
- unregister_memory(mem, section);
+ mem->section_count--;
+ if (mem->section_count == 0) {
+ unregister_mem_sect_under_nodes(mem);
+ mem_remove_simple_file(mem, phys_index);
+ mem_remove_simple_file(mem, state);
+ mem_remove_simple_file(mem, phys_device);
+ mem_remove_simple_file(mem, removable);
+ unregister_memory(mem, section);
+ }
+
+ mutex_unlock(&mem_sysfs_mutex);
return 0;
}
@@ -509,7 +541,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
*/
int register_new_memory(int nid, struct mem_section *section)
{
- return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG);
+ return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG);
}
int unregister_memory_section(struct mem_section *section)
@@ -542,7 +574,7 @@ int __init memory_dev_init(void)
if (!present_section_nr(i))
continue;
err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE,
- 0, BOOT);
+ BOOT);
if (!ret)
ret = err;
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 103be9cacb05..db930d3ee312 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include "base.h"
@@ -14,12 +15,10 @@ static char *make_driver_name(struct device_driver *drv)
{
char *driver_name;
- driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
- GFP_KERNEL);
+ driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
if (!driver_name)
return NULL;
- sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
return driver_name;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 70122791683d..ce012a9c6201 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -9,15 +9,20 @@
#include <linux/memory.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
+#include <linux/compaction.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/swap.h>
+#include <linux/slab.h>
+
+static struct sysdev_class_attribute *node_state_attrs[];
static struct sysdev_class node_class = {
.name = "node",
+ .attrs = node_state_attrs,
};
@@ -61,8 +66,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
struct sysinfo i;
si_meminfo_node(&i, nid);
-
- n = sprintf(buf, "\n"
+ n = sprintf(buf,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
@@ -73,13 +77,33 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n"
"Node %d Unevictable: %8lu kB\n"
- "Node %d Mlocked: %8lu kB\n"
+ "Node %d Mlocked: %8lu kB\n",
+ nid, K(i.totalram),
+ nid, K(i.freeram),
+ nid, K(i.totalram - i.freeram),
+ nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
+ node_page_state(nid, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
+ node_page_state(nid, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
+ nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
+ nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_UNEVICTABLE)),
+ nid, K(node_page_state(nid, NR_MLOCK)));
+
#ifdef CONFIG_HIGHMEM
+ n += sprintf(buf + n,
"Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n"
"Node %d LowTotal: %8lu kB\n"
- "Node %d LowFree: %8lu kB\n"
+ "Node %d LowFree: %8lu kB\n",
+ nid, K(i.totalhigh),
+ nid, K(i.freehigh),
+ nid, K(i.totalram - i.totalhigh),
+ nid, K(i.freeram - i.freehigh));
#endif
+ n += sprintf(buf + n,
"Node %d Dirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
@@ -94,25 +118,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Slab: %8lu kB\n"
"Node %d SReclaimable: %8lu kB\n"
"Node %d SUnreclaim: %8lu kB\n",
- nid, K(i.totalram),
- nid, K(i.freeram),
- nid, K(i.totalram - i.freeram),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
- node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
- node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_UNEVICTABLE)),
- nid, K(node_page_state(nid, NR_MLOCK)),
-#ifdef CONFIG_HIGHMEM
- nid, K(i.totalhigh),
- nid, K(i.freehigh),
- nid, K(i.totalram - i.totalhigh),
- nid, K(i.freeram - i.freehigh),
-#endif
nid, K(node_page_state(nid, NR_FILE_DIRTY)),
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
@@ -155,6 +160,18 @@ static ssize_t node_read_numastat(struct sys_device * dev,
}
static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static ssize_t node_read_vmstat(struct sys_device *dev,
+ struct sysdev_attribute *attr, char *buf)
+{
+ int nid = dev->id;
+ return sprintf(buf,
+ "nr_written %lu\n"
+ "nr_dirtied %lu\n",
+ node_page_state(nid, NR_WRITTEN),
+ node_page_state(nid, NR_DIRTIED));
+}
+static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+
static ssize_t node_read_distance(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf)
{
@@ -162,8 +179,11 @@ static ssize_t node_read_distance(struct sys_device * dev,
int len = 0;
int i;
- /* buf currently PAGE_SIZE, need ~4 chars per node */
- BUILD_BUG_ON(MAX_NUMNODES*4 > PAGE_SIZE/2);
+ /*
+ * buf is currently PAGE_SIZE in length and each node needs 4 chars
+ * at the most (distance + space or newline).
+ */
+ BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
for_each_online_node(i)
len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
@@ -235,10 +255,13 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance);
+ sysdev_create_file(&node->sysdev, &attr_vmstat);
scan_unevictable_register_node(node);
hugetlb_register_node(node);
+
+ compaction_register_node(node);
}
return error;
}
@@ -257,6 +280,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance);
+ sysdev_remove_file(&node->sysdev, &attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
@@ -399,25 +423,27 @@ static int link_mem_sections(int nid)
unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
unsigned long pfn;
+ struct memory_block *mem_blk = NULL;
int err = 0;
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
unsigned long section_nr = pfn_to_section_nr(pfn);
struct mem_section *mem_sect;
- struct memory_block *mem_blk;
int ret;
if (!present_section_nr(section_nr))
continue;
mem_sect = __nr_to_section(section_nr);
- mem_blk = find_memory_block(mem_sect);
+ mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
ret = register_mem_sect_under_node(mem_blk, nid);
if (!err)
err = ret;
/* discard ref obtained in find_memory_block() */
- kobject_put(&mem_blk->sysdev.kobj);
}
+
+ if (mem_blk)
+ kobject_put(&mem_blk->sysdev.kobj);
return err;
}
@@ -544,76 +570,52 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
return n;
}
-static ssize_t print_nodes_possible(struct sysdev_class *class, char *buf)
-{
- return print_nodes_state(N_POSSIBLE, buf);
-}
-
-static ssize_t print_nodes_online(struct sysdev_class *class, char *buf)
-{
- return print_nodes_state(N_ONLINE, buf);
-}
-
-static ssize_t print_nodes_has_normal_memory(struct sysdev_class *class,
- char *buf)
-{
- return print_nodes_state(N_NORMAL_MEMORY, buf);
-}
+struct node_attr {
+ struct sysdev_class_attribute attr;
+ enum node_states state;
+};
-static ssize_t print_nodes_has_cpu(struct sysdev_class *class, char *buf)
+static ssize_t show_node_state(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
{
- return print_nodes_state(N_CPU, buf);
+ struct node_attr *na = container_of(attr, struct node_attr, attr);
+ return print_nodes_state(na->state, buf);
}
-static SYSDEV_CLASS_ATTR(possible, 0444, print_nodes_possible, NULL);
-static SYSDEV_CLASS_ATTR(online, 0444, print_nodes_online, NULL);
-static SYSDEV_CLASS_ATTR(has_normal_memory, 0444, print_nodes_has_normal_memory,
- NULL);
-static SYSDEV_CLASS_ATTR(has_cpu, 0444, print_nodes_has_cpu, NULL);
+#define _NODE_ATTR(name, state) \
+ { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
+static struct node_attr node_state_attr[] = {
+ _NODE_ATTR(possible, N_POSSIBLE),
+ _NODE_ATTR(online, N_ONLINE),
+ _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
+ _NODE_ATTR(has_cpu, N_CPU),
#ifdef CONFIG_HIGHMEM
-static ssize_t print_nodes_has_high_memory(struct sysdev_class *class,
- char *buf)
-{
- return print_nodes_state(N_HIGH_MEMORY, buf);
-}
-
-static SYSDEV_CLASS_ATTR(has_high_memory, 0444, print_nodes_has_high_memory,
- NULL);
+ _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
+};
-struct sysdev_class_attribute *node_state_attr[] = {
- &attr_possible,
- &attr_online,
- &attr_has_normal_memory,
+static struct sysdev_class_attribute *node_state_attrs[] = {
+ &node_state_attr[0].attr,
+ &node_state_attr[1].attr,
+ &node_state_attr[2].attr,
+ &node_state_attr[3].attr,
#ifdef CONFIG_HIGHMEM
- &attr_has_high_memory,
+ &node_state_attr[4].attr,
#endif
- &attr_has_cpu,
+ NULL
};
-static int node_states_init(void)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < NR_NODE_STATES; i++) {
- int ret;
- ret = sysdev_class_create_file(&node_class, node_state_attr[i]);
- if (!err)
- err = ret;
- }
- return err;
-}
-
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
int ret;
+ BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
+ BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
+
ret = sysdev_class_register(&node_class);
if (!ret) {
- ret = node_states_init();
hotplug_memory_notifier(node_memory_callback,
NODE_CALLBACK_PRI);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 58efaf2f1259..f051cfff18af 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -128,7 +129,7 @@ struct platform_object {
};
/**
- * platform_device_put
+ * platform_device_put - destroy a platform device
* @pdev: platform device to free
*
* Free all memory associated with a platform device. This function must
@@ -146,13 +147,14 @@ static void platform_device_release(struct device *dev)
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
+ of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.resource);
kfree(pa);
}
/**
- * platform_device_alloc
+ * platform_device_alloc - create a platform device
* @name: base name of the device we're adding
* @id: instance id
*
@@ -177,7 +179,7 @@ struct platform_device *platform_device_alloc(const char *name, int id)
EXPORT_SYMBOL_GPL(platform_device_alloc);
/**
- * platform_device_add_resources
+ * platform_device_add_resources - add resources to a platform device
* @pdev: platform device allocated by platform_device_alloc to add resources to
* @res: set of resources that needs to be allocated for the device
* @num: number of resources
@@ -187,22 +189,25 @@ EXPORT_SYMBOL_GPL(platform_device_alloc);
* released.
*/
int platform_device_add_resources(struct platform_device *pdev,
- struct resource *res, unsigned int num)
+ const struct resource *res, unsigned int num)
{
struct resource *r;
- r = kmalloc(sizeof(struct resource) * num, GFP_KERNEL);
+ if (!res)
+ return 0;
+
+ r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
if (r) {
- memcpy(r, res, sizeof(struct resource) * num);
pdev->resource = r;
pdev->num_resources = num;
+ return 0;
}
- return r ? 0 : -ENOMEM;
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(platform_device_add_resources);
/**
- * platform_device_add_data
+ * platform_device_add_data - add platform-specific data to a platform device
* @pdev: platform device allocated by platform_device_alloc to add resources to
* @data: platform specific data for this platform device
* @size: size of platform specific data
@@ -214,8 +219,12 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources);
int platform_device_add_data(struct platform_device *pdev, const void *data,
size_t size)
{
- void *d = kmemdup(data, size, GFP_KERNEL);
+ void *d;
+
+ if (!data)
+ return 0;
+ d = kmemdup(data, size, GFP_KERNEL);
if (d) {
pdev->dev.platform_data = d;
return 0;
@@ -344,104 +353,52 @@ void platform_device_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(platform_device_unregister);
/**
- * platform_device_register_simple
- * @name: base name of the device we're adding
- * @id: instance id
- * @res: set of resources that needs to be allocated for the device
- * @num: number of resources
- *
- * This function creates a simple platform device that requires minimal
- * resource and memory management. Canned release function freeing memory
- * allocated for the device allows drivers using such devices to be
- * unloaded without waiting for the last reference to the device to be
- * dropped.
+ * platform_device_register_resndata - add a platform-level device with
+ * resources and platform-specific data
*
- * This interface is primarily intended for use with legacy drivers which
- * probe hardware directly. Because such drivers create sysfs device nodes
- * themselves, rather than letting system infrastructure handle such device
- * enumeration tasks, they don't fully conform to the Linux driver model.
- * In particular, when such drivers are built as modules, they can't be
- * "hotplugged".
- */
-struct platform_device *platform_device_register_simple(const char *name,
- int id,
- struct resource *res,
- unsigned int num)
-{
- struct platform_device *pdev;
- int retval;
-
- pdev = platform_device_alloc(name, id);
- if (!pdev) {
- retval = -ENOMEM;
- goto error;
- }
-
- if (num) {
- retval = platform_device_add_resources(pdev, res, num);
- if (retval)
- goto error;
- }
-
- retval = platform_device_add(pdev);
- if (retval)
- goto error;
-
- return pdev;
-
-error:
- platform_device_put(pdev);
- return ERR_PTR(retval);
-}
-EXPORT_SYMBOL_GPL(platform_device_register_simple);
-
-/**
- * platform_device_register_data
* @parent: parent device for the device we're adding
* @name: base name of the device we're adding
* @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
* @data: platform specific data for this platform device
* @size: size of platform specific data
*
- * This function creates a simple platform device that requires minimal
- * resource and memory management. Canned release function freeing memory
- * allocated for the device allows drivers using such devices to be
- * unloaded without waiting for the last reference to the device to be
- * dropped.
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device *platform_device_register_data(
+struct platform_device *__init_or_module platform_device_register_resndata(
struct device *parent,
const char *name, int id,
+ const struct resource *res, unsigned int num,
const void *data, size_t size)
{
+ int ret = -ENOMEM;
struct platform_device *pdev;
- int retval;
pdev = platform_device_alloc(name, id);
- if (!pdev) {
- retval = -ENOMEM;
- goto error;
- }
+ if (!pdev)
+ goto err;
pdev->dev.parent = parent;
- if (size) {
- retval = platform_device_add_data(pdev, data, size);
- if (retval)
- goto error;
- }
+ ret = platform_device_add_resources(pdev, res, num);
+ if (ret)
+ goto err;
- retval = platform_device_add(pdev);
- if (retval)
- goto error;
+ ret = platform_device_add_data(pdev, data, size);
+ if (ret)
+ goto err;
- return pdev;
+ ret = platform_device_add(pdev);
+ if (ret) {
+err:
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
-error:
- platform_device_put(pdev);
- return ERR_PTR(retval);
+ return pdev;
}
-EXPORT_SYMBOL_GPL(platform_device_register_data);
+EXPORT_SYMBOL_GPL(platform_device_register_resndata);
static int platform_drv_probe(struct device *_dev)
{
@@ -473,7 +430,7 @@ static void platform_drv_shutdown(struct device *_dev)
}
/**
- * platform_driver_register
+ * platform_driver_register - register a driver for platform-level devices
* @drv: platform driver structure
*/
int platform_driver_register(struct platform_driver *drv)
@@ -491,7 +448,7 @@ int platform_driver_register(struct platform_driver *drv)
EXPORT_SYMBOL_GPL(platform_driver_register);
/**
- * platform_driver_unregister
+ * platform_driver_unregister - unregister a driver for platform-level devices
* @drv: platform driver structure
*/
void platform_driver_unregister(struct platform_driver *drv)
@@ -535,12 +492,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
* if the probe was successful, and make sure any forced probes of
* new devices fail.
*/
- spin_lock(&platform_bus_type.p->klist_drivers.k_lock);
+ spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
drv->probe = NULL;
if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
retval = -ENODEV;
drv->driver.probe = platform_drv_probe_fail;
- spin_unlock(&platform_bus_type.p->klist_drivers.k_lock);
+ spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
if (code != retval)
platform_driver_unregister(drv);
@@ -548,6 +505,62 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
}
EXPORT_SYMBOL_GPL(platform_driver_probe);
+/**
+ * platform_create_bundle - register driver and create corresponding device
+ * @driver: platform driver structure
+ * @probe: the driver probe routine, probably from an __init section
+ * @res: set of resources that needs to be allocated for the device
+ * @n_res: number of resources
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * Use this in legacy-style modules that probe hardware directly and
+ * register a single platform device and corresponding platform driver.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+struct platform_device * __init_or_module platform_create_bundle(
+ struct platform_driver *driver,
+ int (*probe)(struct platform_device *),
+ struct resource *res, unsigned int n_res,
+ const void *data, size_t size)
+{
+ struct platform_device *pdev;
+ int error;
+
+ pdev = platform_device_alloc(driver->driver.name, -1);
+ if (!pdev) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+ error = platform_device_add_resources(pdev, res, n_res);
+ if (error)
+ goto err_pdev_put;
+
+ error = platform_device_add_data(pdev, data, size);
+ if (error)
+ goto err_pdev_put;
+
+ error = platform_device_add(pdev);
+ if (error)
+ goto err_pdev_put;
+
+ error = platform_driver_probe(driver, probe);
+ if (error)
+ goto err_pdev_del;
+
+ return pdev;
+
+err_pdev_del:
+ platform_device_del(pdev);
+err_pdev_put:
+ platform_device_put(pdev);
+err_out:
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL_GPL(platform_create_bundle);
+
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
* fully running: "modprobe $MODALIAS"
@@ -571,6 +584,12 @@ static struct device_attribute platform_dev_attrs[] = {
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct platform_device *pdev = to_platform_device(dev);
+ int rc;
+
+ /* Some devices have extra OF data and an OF-style MODALIAS */
+ rc = of_device_uevent(dev,env);
+ if (rc != -ENODEV)
+ return rc;
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
(pdev->id_entry) ? pdev->id_entry->name : pdev->name);
@@ -578,7 +597,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
}
static const struct platform_device_id *platform_match_id(
- struct platform_device_id *id,
+ const struct platform_device_id *id,
struct platform_device *pdev)
{
while (id->name[0]) {
@@ -609,7 +628,11 @@ static int platform_match(struct device *dev, struct device_driver *drv)
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
- /* match against the id table first */
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
@@ -671,7 +694,7 @@ static void platform_pm_complete(struct device *dev)
#ifdef CONFIG_SUSPEND
-static int platform_pm_suspend(struct device *dev)
+int __weak platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -689,7 +712,7 @@ static int platform_pm_suspend(struct device *dev)
return ret;
}
-static int platform_pm_suspend_noirq(struct device *dev)
+int __weak platform_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -705,7 +728,7 @@ static int platform_pm_suspend_noirq(struct device *dev)
return ret;
}
-static int platform_pm_resume(struct device *dev)
+int __weak platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -723,7 +746,7 @@ static int platform_pm_resume(struct device *dev)
return ret;
}
-static int platform_pm_resume_noirq(struct device *dev)
+int __weak platform_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -903,17 +926,17 @@ static int platform_pm_restore_noirq(struct device *dev)
int __weak platform_pm_runtime_suspend(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_suspend(dev);
};
int __weak platform_pm_runtime_resume(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_resume(dev);
};
int __weak platform_pm_runtime_idle(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_idle(dev);
};
#else /* !CONFIG_PM_RUNTIME */
@@ -953,6 +976,41 @@ struct bus_type platform_bus_type = {
};
EXPORT_SYMBOL_GPL(platform_bus_type);
+/**
+ * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops
+ *
+ * This function can be used by platform code to get the current
+ * set of dev_pm_ops functions used by the platform_bus_type.
+ */
+const struct dev_pm_ops * __init platform_bus_get_pm_ops(void)
+{
+ return platform_bus_type.pm;
+}
+
+/**
+ * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type
+ *
+ * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type
+ *
+ * Platform code can override the dev_pm_ops methods of
+ * platform_bus_type by using this function. It is expected that
+ * platform code will first do a platform_bus_get_pm_ops(), then
+ * kmemdup it, then customize selected methods and pass a pointer to
+ * the new struct dev_pm_ops to this function.
+ *
+ * Since platform-specific code is customizing methods for *all*
+ * devices (not just platform-specific devices) it is expected that
+ * any custom overrides of these functions will keep existing behavior
+ * and simply extend it. For example, any customization of the
+ * runtime PM methods should continue to call the pm_generic_*
+ * functions as the default ones do in addition to the
+ * platform-specific behavior.
+ */
+void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm)
+{
+ platform_bus_type.pm = pm;
+}
+
int __init platform_bus_init(void)
{
int error;
@@ -994,9 +1052,11 @@ static __initdata LIST_HEAD(early_platform_driver_list);
static __initdata LIST_HEAD(early_platform_device_list);
/**
- * early_platform_driver_register
+ * early_platform_driver_register - register early platform driver
* @epdrv: early_platform driver structure
* @buf: string passed from early_param()
+ *
+ * Helper function for early_platform_init() / early_platform_init_buffer()
*/
int __init early_platform_driver_register(struct early_platform_driver *epdrv,
char *buf)
@@ -1048,9 +1108,12 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv,
}
/**
- * early_platform_add_devices - add a numbers of early platform devices
+ * early_platform_add_devices - adds a number of early platform devices
* @devs: array of early platform devices to add
* @num: number of early platform devices in array
+ *
+ * Used by early architecture code to register early platform devices and
+ * their platform data.
*/
void __init early_platform_add_devices(struct platform_device **devs, int num)
{
@@ -1070,8 +1133,12 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
}
/**
- * early_platform_driver_register_all
+ * early_platform_driver_register_all - register early platform drivers
* @class_str: string to identify early platform driver class
+ *
+ * Used by architecture code to register all early platform drivers
+ * for a certain class. If omitted then only early platform drivers
+ * with matching kernel command line class parameters will be registered.
*/
void __init early_platform_driver_register_all(char *class_str)
{
@@ -1093,7 +1160,7 @@ void __init early_platform_driver_register_all(char *class_str)
}
/**
- * early_platform_match
+ * early_platform_match - find early platform device matching driver
* @epdrv: early platform driver structure
* @id: id to match against
*/
@@ -1111,7 +1178,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id)
}
/**
- * early_platform_left
+ * early_platform_left - check if early platform driver has matching devices
* @epdrv: early platform driver structure
* @id: return true if id or above exists
*/
@@ -1129,7 +1196,7 @@ static __init int early_platform_left(struct early_platform_driver *epdrv,
}
/**
- * early_platform_driver_probe_id
+ * early_platform_driver_probe_id - probe drivers matching class_str and id
* @class_str: string to identify early platform driver class
* @id: id to match against
* @nr_probe: number of platform devices to successfully probe before exiting
@@ -1181,6 +1248,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
if (match) {
+ /*
+ * Set up a sensible init_name to enable
+ * dev_name() and others to be used before the
+ * rest of the driver core is initialized.
+ */
+ if (!match->dev.init_name && slab_is_available()) {
+ if (match->id != -1)
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s.%d",
+ match->name,
+ match->id);
+ else
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s",
+ match->name);
+
+ if (!match->dev.init_name)
+ return -ENOMEM;
+ }
+
if (epdrv->pdrv->probe(match))
pr_warning("%s: unable to probe %s early.\n",
class_str, match->name);
@@ -1199,10 +1286,14 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
/**
- * early_platform_driver_probe
+ * early_platform_driver_probe - probe a class of registered drivers
* @class_str: string to identify early platform driver class
* @nr_probe: number of platform devices to successfully probe before exiting
* @user_only: only probe user specified early platform devices
+ *
+ * Used by architecture code to probe registered early platform drivers
+ * within a certain class. For probe to happen a registered early platform
+ * device matching a registered early platform driver is needed.
*/
int __init early_platform_driver_probe(char *class_str,
int nr_probe,
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3ce3519e8f30..abe46edfe5b4 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,7 +1,9 @@
obj-$(CONFIG_PM) += sysfs.o
-obj-$(CONFIG_PM_SLEEP) += main.o
+obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
+obj-$(CONFIG_PM_OPS) += generic_ops.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
+obj-$(CONFIG_PM_OPP) += opp.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
new file mode 100644
index 000000000000..81f2c84697f4
--- /dev/null
+++ b/drivers/base/power/generic_ops.c
@@ -0,0 +1,233 @@
+/*
+ * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
+ * @dev: Device to handle.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_idle(), execute it and return its error code, if nonzero.
+ * Otherwise, execute pm_runtime_suspend() for the device and return 0.
+ */
+int pm_generic_runtime_idle(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm && pm->runtime_idle) {
+ int ret = pm->runtime_idle(dev);
+ if (ret)
+ return ret;
+ }
+
+ pm_runtime_suspend(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
+
+/**
+ * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
+ * @dev: Device to suspend.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_suspend(), execute it and return its error code. Otherwise,
+ * return -EINVAL.
+ */
+int pm_generic_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
+
+/**
+ * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
+ * @dev: Device to resume.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_resume(), execute it and return its error code. Otherwise,
+ * return -EINVAL.
+ */
+int pm_generic_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
+ * @dev: Device to handle.
+ * @event: PM transition of the system under way.
+ *
+ * If the device has not been suspended at run time, execute the
+ * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
+ * return its error code. Otherwise, return zero.
+ */
+static int __pm_generic_call(struct device *dev, int event)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int (*callback)(struct device *);
+
+ if (!pm || pm_runtime_suspended(dev))
+ return 0;
+
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ callback = pm->suspend;
+ break;
+ case PM_EVENT_FREEZE:
+ callback = pm->freeze;
+ break;
+ case PM_EVENT_HIBERNATE:
+ callback = pm->poweroff;
+ break;
+ case PM_EVENT_THAW:
+ callback = pm->thaw;
+ break;
+ default:
+ callback = NULL;
+ break;
+ }
+
+ return callback ? callback(dev) : 0;
+}
+
+/**
+ * pm_generic_suspend - Generic suspend callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend);
+
+/**
+ * pm_generic_freeze - Generic freeze callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_FREEZE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze);
+
+/**
+ * pm_generic_poweroff - Generic poweroff callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+
+/**
+ * pm_generic_thaw - Generic thaw callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_THAW);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw);
+
+/**
+ * __pm_generic_resume - Generic resume/restore callback for subsystems.
+ * @dev: Device to handle.
+ * @event: PM transition of the system under way.
+ *
+ * Execute the resume/resotre callback provided by the @dev's driver, if
+ * defined. If it returns 0, change the device's runtime PM status to 'active'.
+ * Return the callback's error code.
+ */
+static int __pm_generic_resume(struct device *dev, int event)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int (*callback)(struct device *);
+ int ret;
+
+ if (!pm)
+ return 0;
+
+ switch (event) {
+ case PM_EVENT_RESUME:
+ callback = pm->resume;
+ break;
+ case PM_EVENT_RESTORE:
+ callback = pm->restore;
+ break;
+ default:
+ callback = NULL;
+ break;
+ }
+
+ if (!callback)
+ return 0;
+
+ ret = callback(dev);
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * pm_generic_resume - Generic resume callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESUME);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume);
+
+/**
+ * pm_generic_restore - Generic restore callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore);
+#endif /* CONFIG_PM_SLEEP */
+
+struct dev_pm_ops generic_subsys_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+#endif
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = pm_generic_runtime_suspend,
+ .runtime_resume = pm_generic_runtime_resume,
+ .runtime_idle = pm_generic_runtime_idle,
+#endif
+};
+EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a5142bddef41..31b526661ec4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -25,6 +25,7 @@
#include <linux/resume-trace.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/async.h>
#include "../base.h"
#include "power.h"
@@ -34,14 +35,15 @@
* because children are guaranteed to be discovered after parents, and
* are inserted at the back of the list on discovery.
*
- * Since device_pm_add() may be called with a device semaphore held,
- * we must never try to acquire a device semaphore while holding
+ * Since device_pm_add() may be called with a device lock held,
+ * we must never try to acquire a device lock while holding
* dpm_list_mutex.
*/
LIST_HEAD(dpm_list);
static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
/*
* Set once the preparation of devices for a PM transition has started, reset
@@ -49,6 +51,8 @@ static DEFINE_MUTEX(dpm_list_mtx);
*/
static bool transition_started;
+static int async_error;
+
/**
* device_pm_init - Initialize the PM-related part of a device object.
* @dev: Device object being initialized.
@@ -56,6 +60,10 @@ static bool transition_started;
void device_pm_init(struct device *dev)
{
dev->power.status = DPM_ON;
+ init_completion(&dev->power.completion);
+ complete_all(&dev->power.completion);
+ dev->power.wakeup = NULL;
+ spin_lock_init(&dev->power.lock);
pm_runtime_init(dev);
}
@@ -111,9 +119,11 @@ void device_pm_remove(struct device *dev)
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
+ complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
+ device_wakeup_disable(dev);
pm_runtime_remove(dev);
}
@@ -188,6 +198,31 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
}
/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+ if (!dev)
+ return;
+
+ if (async || (pm_async_enabled && dev->power.async_suspend))
+ wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+ dpm_wait(dev, *((bool *)async_ptr));
+ return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+ device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
+/**
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
* @ops: PM operations to choose from.
@@ -271,8 +306,9 @@ static int pm_noirq_op(struct device *dev,
ktime_t calltime, delta, rettime;
if (initcall_debug) {
- pr_info("calling %s_i+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -375,7 +411,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
{
ktime_t calltime;
- s64 usecs64;
+ u64 usecs64;
int usecs;
calltime = ktime_get();
@@ -409,8 +445,23 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
+ if (error)
+ goto End;
+ }
+
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "EARLY type ");
+ error = pm_noirq_op(dev, dev->type->pm, state);
+ if (error)
+ goto End;
}
+ if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "EARLY class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ }
+
+End:
TRACE_RESUME(error);
return error;
}
@@ -468,15 +519,19 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state)
+static int device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- down(&dev->sem);
+ dpm_wait(dev->parent, async);
+ device_lock(dev);
+
+ dev->power.status = DPM_RESUMING;
if (dev->bus) {
if (dev->bus->pm) {
@@ -509,12 +564,30 @@ static int device_resume(struct device *dev, pm_message_t state)
}
}
End:
- up(&dev->sem);
+ device_unlock(dev);
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static void async_resume(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+ put_device(dev);
+}
+
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -525,21 +598,34 @@ static int device_resume(struct device *dev, pm_message_t state)
static void dpm_resume(pm_message_t state)
{
struct list_head list;
+ struct device *dev;
ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.next);
+ pm_transition = state;
+ async_error = 0;
+ list_for_each_entry(dev, &dpm_list, power.entry) {
+ if (dev->power.status < DPM_OFF)
+ continue;
+
+ INIT_COMPLETION(dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_list)) {
+ dev = to_device(dpm_list.next);
get_device(dev);
- if (dev->power.status >= DPM_OFF) {
+ if (dev->power.status >= DPM_OFF && !is_async(dev)) {
int error;
- dev->power.status = DPM_RESUMING;
mutex_unlock(&dpm_list_mtx);
- error = device_resume(dev, state);
+ error = device_resume(dev, state, false);
mutex_lock(&dpm_list_mtx);
if (error)
@@ -554,6 +640,7 @@ static void dpm_resume(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, NULL);
}
@@ -564,7 +651,7 @@ static void dpm_resume(pm_message_t state)
*/
static void device_complete(struct device *dev, pm_message_t state)
{
- down(&dev->sem);
+ device_lock(dev);
if (dev->class && dev->class->pm && dev->class->pm->complete) {
pm_dev_dbg(dev, state, "completing class ");
@@ -581,7 +668,7 @@ static void device_complete(struct device *dev, pm_message_t state)
dev->bus->pm->complete(dev);
}
- up(&dev->sem);
+ device_unlock(dev);
}
/**
@@ -670,10 +757,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
+ if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "LATE class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ if (error)
+ goto End;
+ }
+
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "LATE type ");
+ error = pm_noirq_op(dev, dev->type->pm, state);
+ if (error)
+ goto End;
+ }
+
if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
+
+End:
return error;
}
@@ -735,12 +838,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state)
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
- down(&dev->sem);
+ dpm_wait_for_children(dev, async);
+ device_lock(dev);
+
+ if (async_error)
+ goto End;
if (dev->class) {
if (dev->class->pm) {
@@ -772,12 +880,45 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = legacy_suspend(dev, state, dev->bus->suspend);
}
}
+
+ if (!error)
+ dev->power.status = DPM_OFF;
+
End:
- up(&dev->sem);
+ device_unlock(dev);
+ complete_all(&dev->power.completion);
+
+ if (error)
+ async_error = error;
return error;
}
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+ INIT_COMPLETION(dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend, dev);
+ return 0;
+ }
+
+ return __device_suspend(dev, pm_transition, false);
+}
+
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -790,13 +931,15 @@ static int dpm_suspend(pm_message_t state)
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state);
+ error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -804,13 +947,17 @@ static int dpm_suspend(pm_message_t state)
put_device(dev);
break;
}
- dev->power.status = DPM_OFF;
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &list);
put_device(dev);
+ if (async_error)
+ break;
}
list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
if (!error)
dpm_show_time(starttime, state, NULL);
return error;
@@ -828,7 +975,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
{
int error = 0;
- down(&dev->sem);
+ device_lock(dev);
if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
pm_dev_dbg(dev, state, "preparing ");
@@ -852,7 +999,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
suspend_report_result(dev->class->pm->prepare, error);
}
End:
- up(&dev->sem);
+ device_unlock(dev);
return error;
}
@@ -936,3 +1083,15 @@ void __suspend_report_result(const char *function, void *fn, int ret)
printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
+
+/**
+ * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
+ * @dev: Device to wait for.
+ * @subordinate: Device that needs to wait for @dev.
+ */
+int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
+{
+ dpm_wait(dev, subordinate->power.async_suspend);
+ return async_error;
+}
+EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
new file mode 100644
index 000000000000..2bb9b4cf59d7
--- /dev/null
+++ b/drivers/base/power/opp.c
@@ -0,0 +1,628 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/opp.h>
+
+/*
+ * Internal data structure organization with the OPP layer library is as
+ * follows:
+ * dev_opp_list (root)
+ * |- device 1 (represents voltage domain 1)
+ * | |- opp 1 (availability, freq, voltage)
+ * | |- opp 2 ..
+ * ... ...
+ * | `- opp n ..
+ * |- device 2 (represents the next voltage domain)
+ * ...
+ * `- device m (represents mth voltage domain)
+ * device 1, 2.. are represented by dev_opp structure while each opp
+ * is represented by the opp structure.
+ */
+
+/**
+ * struct opp - Generic OPP description structure
+ * @node: opp list node. The nodes are maintained throughout the lifetime
+ * of boot. It is expected only an optimal set of OPPs are
+ * added to the library by the SoC framework.
+ * RCU usage: opp list is traversed with RCU locks. node
+ * modification is possible realtime, hence the modifications
+ * are protected by the dev_opp_list_lock for integrity.
+ * IMPORTANT: the opp nodes should be maintained in increasing
+ * order.
+ * @available: true/false - marks if this OPP as available or not
+ * @rate: Frequency in hertz
+ * @u_volt: Nominal voltage in microvolts corresponding to this OPP
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ *
+ * This structure stores the OPP information for a given device.
+ */
+struct opp {
+ struct list_head node;
+
+ bool available;
+ unsigned long rate;
+ unsigned long u_volt;
+
+ struct device_opp *dev_opp;
+};
+
+/**
+ * struct device_opp - Device opp structure
+ * @node: list node - contains the devices with OPPs that
+ * have been registered. Nodes once added are not modified in this
+ * list.
+ * RCU usage: nodes are not modified in the list of device_opp,
+ * however addition is possible and is secured by dev_opp_list_lock
+ * @dev: device pointer
+ * @opp_list: list of opps
+ *
+ * This is an internal data structure maintaining the link to opps attached to
+ * a device. This structure is not meant to be shared to users as it is
+ * meant for book keeping and private to OPP library
+ */
+struct device_opp {
+ struct list_head node;
+
+ struct device *dev;
+ struct list_head opp_list;
+};
+
+/*
+ * The root of the list of all devices. All device_opp structures branch off
+ * from here, with each device_opp containing the list of opp it supports in
+ * various states of availability.
+ */
+static LIST_HEAD(dev_opp_list);
+/* Lock to allow exclusive modification to the device and opp lists */
+static DEFINE_MUTEX(dev_opp_list_lock);
+
+/**
+ * find_device_opp() - find device_opp struct using device pointer
+ * @dev: device pointer used to lookup device OPPs
+ *
+ * Search list of device OPPs for one containing matching device. Does a RCU
+ * reader operation to grab the pointer needed.
+ *
+ * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * -EINVAL based on type of error.
+ *
+ * Locking: This function must be called under rcu_read_lock(). device_opp
+ * is a RCU protected pointer. This means that device_opp is valid as long
+ * as we are under RCU lock.
+ */
+static struct device_opp *find_device_opp(struct device *dev)
+{
+ struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+
+ if (unlikely(IS_ERR_OR_NULL(dev))) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
+ if (tmp_dev_opp->dev == dev) {
+ dev_opp = tmp_dev_opp;
+ break;
+ }
+ }
+
+ return dev_opp;
+}
+
+/**
+ * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * @opp: opp for which voltage has to be returned for
+ *
+ * Return voltage in micro volt corresponding to the opp, else
+ * return 0
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. This means that opp which could have been fetched by
+ * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
+ * under RCU lock. The pointer returned by the opp_find_freq family must be
+ * used in the same section as the usage of this function with the pointer
+ * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
+ * pointer.
+ */
+unsigned long opp_get_voltage(struct opp *opp)
+{
+ struct opp *tmp_opp;
+ unsigned long v = 0;
+
+ tmp_opp = rcu_dereference(opp);
+ if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+ pr_err("%s: Invalid parameters\n", __func__);
+ else
+ v = tmp_opp->u_volt;
+
+ return v;
+}
+
+/**
+ * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * @opp: opp for which frequency has to be returned for
+ *
+ * Return frequency in hertz corresponding to the opp, else
+ * return 0
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. This means that opp which could have been fetched by
+ * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
+ * under RCU lock. The pointer returned by the opp_find_freq family must be
+ * used in the same section as the usage of this function with the pointer
+ * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
+ * pointer.
+ */
+unsigned long opp_get_freq(struct opp *opp)
+{
+ struct opp *tmp_opp;
+ unsigned long f = 0;
+
+ tmp_opp = rcu_dereference(opp);
+ if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+ pr_err("%s: Invalid parameters\n", __func__);
+ else
+ f = tmp_opp->rate;
+
+ return f;
+}
+
+/**
+ * opp_get_opp_count() - Get number of opps available in the opp list
+ * @dev: device for which we do this operation
+ *
+ * This function returns the number of available opps if there are any,
+ * else returns 0 if none or the corresponding error value.
+ *
+ * Locking: This function must be called under rcu_read_lock(). This function
+ * internally references two RCU protected structures: device_opp and opp which
+ * are safe as long as we are under a common RCU locked section.
+ */
+int opp_get_opp_count(struct device *dev)
+{
+ struct device_opp *dev_opp;
+ struct opp *temp_opp;
+ int count = 0;
+
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ int r = PTR_ERR(dev_opp);
+ dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+ return r;
+ }
+
+ list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ if (temp_opp->available)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * opp_find_freq_exact() - search for an exact frequency
+ * @dev: device for which we do this operation
+ * @freq: frequency to search for
+ * @is_available: true/false - match for available opp
+ *
+ * Searches for exact match in the opp list and returns pointer to the matching
+ * opp if found, else returns ERR_PTR in case of error and should be handled
+ * using IS_ERR.
+ *
+ * Note: available is a modifier for the search. if available=true, then the
+ * match is for exact matching frequency and is available in the stored OPP
+ * table. if false, the match is for exact frequency which is not available.
+ *
+ * This provides a mechanism to enable an opp which is not available currently
+ * or the opposite as well.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
+ bool available)
+{
+ struct device_opp *dev_opp;
+ struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ int r = PTR_ERR(dev_opp);
+ dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ if (temp_opp->available == available &&
+ temp_opp->rate == freq) {
+ opp = temp_opp;
+ break;
+ }
+ }
+
+ return opp;
+}
+
+/**
+ * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * @dev: device for which we do this operation
+ * @freq: Start frequency
+ *
+ * Search for the matching ceil *available* OPP from a starting freq
+ * for a device.
+ *
+ * Returns matching *opp and refreshes *freq accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+{
+ struct device_opp *dev_opp;
+ struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+
+ if (!dev || !freq) {
+ dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp))
+ return opp;
+
+ list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ if (temp_opp->available && temp_opp->rate >= *freq) {
+ opp = temp_opp;
+ *freq = opp->rate;
+ break;
+ }
+ }
+
+ return opp;
+}
+
+/**
+ * opp_find_freq_floor() - Search for a rounded floor freq
+ * @dev: device for which we do this operation
+ * @freq: Start frequency
+ *
+ * Search for the matching floor *available* OPP from a starting freq
+ * for a device.
+ *
+ * Returns matching *opp and refreshes *freq accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+{
+ struct device_opp *dev_opp;
+ struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+
+ if (!dev || !freq) {
+ dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp))
+ return opp;
+
+ list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ if (temp_opp->available) {
+ /* go to the next node, before choosing prev */
+ if (temp_opp->rate > *freq)
+ break;
+ else
+ opp = temp_opp;
+ }
+ }
+ if (!IS_ERR(opp))
+ *freq = opp->rate;
+
+ return opp;
+}
+
+/**
+ * opp_add() - Add an OPP table from a table definitions
+ * @dev: device for which we do this operation
+ * @freq: Frequency in Hz for this OPP
+ * @u_volt: Voltage in uVolts for this OPP
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * opp_enable/disable functions.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+{
+ struct device_opp *dev_opp = NULL;
+ struct opp *opp, *new_opp;
+ struct list_head *head;
+
+ /* allocate new OPP node */
+ new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+ if (!new_opp) {
+ dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Check for existing list for 'dev' */
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ /*
+ * Allocate a new device OPP table. In the infrequent case
+ * where a new device is needed to be added, we pay this
+ * penalty.
+ */
+ dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
+ if (!dev_opp) {
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ dev_warn(dev,
+ "%s: Unable to create device OPP structure\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ dev_opp->dev = dev;
+ INIT_LIST_HEAD(&dev_opp->opp_list);
+
+ /* Secure the device list modification */
+ list_add_rcu(&dev_opp->node, &dev_opp_list);
+ }
+
+ /* populate the opp table */
+ new_opp->dev_opp = dev_opp;
+ new_opp->rate = freq;
+ new_opp->u_volt = u_volt;
+ new_opp->available = true;
+
+ /* Insert new OPP in order of increasing frequency */
+ head = &dev_opp->opp_list;
+ list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+ if (new_opp->rate < opp->rate)
+ break;
+ else
+ head = &opp->node;
+ }
+
+ list_add_rcu(&new_opp->node, head);
+ mutex_unlock(&dev_opp_list_lock);
+
+ return 0;
+}
+
+/**
+ * opp_set_availability() - helper to set the availability of an opp
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to modify availability
+ * @availability_req: availability status requested for this opp
+ *
+ * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
+ * share a common logic which is isolated here.
+ *
+ * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks to
+ * keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+static int opp_set_availability(struct device *dev, unsigned long freq,
+ bool availability_req)
+{
+ struct device_opp *tmp_dev_opp, *dev_opp = NULL;
+ struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* keep the node allocated */
+ new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+ if (!new_opp) {
+ dev_warn(dev, "%s: Unable to create OPP\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Find the device_opp */
+ list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
+ if (dev == tmp_dev_opp->dev) {
+ dev_opp = tmp_dev_opp;
+ break;
+ }
+ }
+ if (IS_ERR(dev_opp)) {
+ r = PTR_ERR(dev_opp);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ goto unlock;
+ }
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->available == availability_req)
+ goto unlock;
+ /* copy the old data over */
+ *new_opp = *opp;
+
+ /* plug in new node */
+ new_opp->available = availability_req;
+
+ list_replace_rcu(&opp->node, &new_opp->node);
+ mutex_unlock(&dev_opp_list_lock);
+ synchronize_rcu();
+
+ /* clean up old opp */
+ new_opp = opp;
+ goto out;
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+out:
+ kfree(new_opp);
+ return r;
+}
+
+/**
+ * opp_enable() - Enable a specific OPP
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to enable
+ *
+ * Enables a provided opp. If the operation is valid, this returns 0, else the
+ * corresponding error value. It is meant to be used for users an OPP available
+ * after being temporarily made unavailable with opp_disable.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU and mutex locks to keep the
+ * integrity of the internal data structures. Callers should ensure that
+ * this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+int opp_enable(struct device *dev, unsigned long freq)
+{
+ return opp_set_availability(dev, freq, true);
+}
+
+/**
+ * opp_disable() - Disable a specific OPP
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to disable
+ *
+ * Disables a provided opp. If the operation is valid, this returns
+ * 0, else the corresponding error value. It is meant to be a temporary
+ * control by users to make this OPP not available until the circumstances are
+ * right to make it available again (with a call to opp_enable).
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU and mutex locks to keep the
+ * integrity of the internal data structures. Callers should ensure that
+ * this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+int opp_disable(struct device *dev, unsigned long freq)
+{
+ return opp_set_availability(dev, freq, false);
+}
+
+#ifdef CONFIG_CPU_FREQ
+/**
+ * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * @dev: device for which we do this operation
+ * @table: Cpufreq table returned back to caller
+ *
+ * Generate a cpufreq table for a provided device- this assumes that the
+ * opp list is already initialized and ready for usage.
+ *
+ * This function allocates required memory for the cpufreq table. It is
+ * expected that the caller does the required maintenance such as freeing
+ * the table as required.
+ *
+ * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
+ * if no memory available for the operation (table is not populated), returns 0
+ * if successful and table is populated.
+ *
+ * WARNING: It is important for the callers to ensure refreshing their copy of
+ * the table if any of the mentioned functions have been invoked in the interim.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * To simplify the logic, we pretend we are updater and hold relevant mutex here
+ * Callers should ensure that this function is *NOT* called under RCU protection
+ * or in contexts where mutex locking cannot be used.
+ */
+int opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ struct device_opp *dev_opp;
+ struct opp *opp;
+ struct cpufreq_frequency_table *freq_table;
+ int i = 0;
+
+ /* Pretend as if I am an updater */
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ int r = PTR_ERR(dev_opp);
+ mutex_unlock(&dev_opp_list_lock);
+ dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ return r;
+ }
+
+ freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+ if (!freq_table) {
+ mutex_unlock(&dev_opp_list_lock);
+ dev_warn(dev, "%s: Unable to allocate frequency table\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ if (opp->available) {
+ freq_table[i].index = i;
+ freq_table[i].frequency = opp->rate / 1000;
+ i++;
+ }
+ }
+ mutex_unlock(&dev_opp_list_lock);
+
+ freq_table[i].index = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+
+ return 0;
+}
+#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b8fa1aa5225a..698dde742587 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {}
#ifdef CONFIG_PM_SLEEP
-/*
- * main.c
- */
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+/* drivers/base/power/main.c */
extern struct list_head dpm_list; /* The active device list */
static inline struct device *to_device(struct list_head *entry)
@@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_init(struct device *dev)
{
+ spin_lock_init(&dev->power.lock);
pm_runtime_init(dev);
}
@@ -59,6 +60,7 @@ static inline void device_pm_move_last(struct device *dev) {}
extern int dpm_sysfs_add(struct device *);
extern void dpm_sysfs_remove(struct device *);
+extern void rpm_sysfs_remove(struct device *);
#else /* CONFIG_PM */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index f8b044e8aef7..126ca492dd08 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -2,17 +2,55 @@
* drivers/base/power/runtime.c - Helper functions for device run-time PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
*
* This file is released under the GPLv2.
*/
#include <linux/sched.h>
#include <linux/pm_runtime.h>
-#include <linux/jiffies.h>
+#include "power.h"
-static int __pm_runtime_resume(struct device *dev, bool from_wq);
-static int __pm_request_idle(struct device *dev);
-static int __pm_request_resume(struct device *dev);
+static int rpm_resume(struct device *dev, int rpmflags);
+static int rpm_suspend(struct device *dev, int rpmflags);
+
+/**
+ * update_pm_runtime_accounting - Update the time accounting of power states
+ * @dev: Device to update the accounting for
+ *
+ * In order to be able to have time accounting of the various power states
+ * (as used by programs such as PowerTOP to show the effectiveness of runtime
+ * PM), we need to track the time spent in each state.
+ * update_pm_runtime_accounting must be called each time before the
+ * runtime_status field is updated, to account the time in the old state
+ * correctly.
+ */
+void update_pm_runtime_accounting(struct device *dev)
+{
+ unsigned long now = jiffies;
+ int delta;
+
+ delta = now - dev->power.accounting_timestamp;
+
+ if (delta < 0)
+ delta = 0;
+
+ dev->power.accounting_timestamp = now;
+
+ if (dev->power.disable_depth > 0)
+ return;
+
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ dev->power.suspended_jiffies += delta;
+ else
+ dev->power.active_jiffies += delta;
+}
+
+static void __update_runtime_status(struct device *dev, enum rpm_status status)
+{
+ update_pm_runtime_accounting(dev);
+ dev->power.runtime_status = status;
+}
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
@@ -40,62 +78,154 @@ static void pm_runtime_cancel_pending(struct device *dev)
dev->power.request = RPM_REQ_NONE;
}
-/**
- * __pm_runtime_idle - Notify device bus type if the device can be suspended.
- * @dev: Device to notify the bus type about.
+/*
+ * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
+ * @dev: Device to handle.
*
- * This function must be called under dev->power.lock with interrupts disabled.
+ * Compute the autosuspend-delay expiration time based on the device's
+ * power.last_busy time. If the delay has already expired or is disabled
+ * (negative) or the power.use_autosuspend flag isn't set, return 0.
+ * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
+ *
+ * This function may be called either with or without dev->power.lock held.
+ * Either way it can be racy, since power.last_busy may be updated at any time.
*/
-static int __pm_runtime_idle(struct device *dev)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
+unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
+{
+ int autosuspend_delay;
+ long elapsed;
+ unsigned long last_busy;
+ unsigned long expires = 0;
+
+ if (!dev->power.use_autosuspend)
+ goto out;
+
+ autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
+ if (autosuspend_delay < 0)
+ goto out;
+
+ last_busy = ACCESS_ONCE(dev->power.last_busy);
+ elapsed = jiffies - last_busy;
+ if (elapsed < 0)
+ goto out; /* jiffies has wrapped around. */
+
+ /*
+ * If the autosuspend_delay is >= 1 second, align the timer by rounding
+ * up to the nearest second.
+ */
+ expires = last_busy + msecs_to_jiffies(autosuspend_delay);
+ if (autosuspend_delay >= 1000)
+ expires = round_jiffies(expires);
+ expires += !expires;
+ if (elapsed >= expires - last_busy)
+ expires = 0; /* Already expired. */
+
+ out:
+ return expires;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+
+/**
+ * rpm_check_suspend_allowed - Test whether a device may be suspended.
+ * @dev: Device to test.
+ */
+static int rpm_check_suspend_allowed(struct device *dev)
{
int retval = 0;
if (dev->power.runtime_error)
retval = -EINVAL;
- else if (dev->power.idle_notification)
- retval = -EINPROGRESS;
else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0
- || dev->power.runtime_status != RPM_ACTIVE)
+ || dev->power.disable_depth > 0)
retval = -EAGAIN;
else if (!pm_children_suspended(dev))
retval = -EBUSY;
+
+ /* Pending resume requests take precedence over suspends. */
+ else if ((dev->power.deferred_resume
+ && dev->power.status == RPM_SUSPENDING)
+ || (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME))
+ retval = -EAGAIN;
+ else if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+
+ return retval;
+}
+
+/**
+ * rpm_idle - Notify device bus type if the device can be suspended.
+ * @dev: Device to notify the bus type about.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's run-time PM status allows it to be suspended. If
+ * another idle notification has been started earlier, return immediately. If
+ * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
+ * run the ->runtime_idle() callback directly.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_idle(struct device *dev, int rpmflags)
+{
+ int (*callback)(struct device *);
+ int retval;
+
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval < 0)
+ ; /* Conditions are wrong. */
+
+ /* Idle notifications are allowed only in the RPM_ACTIVE state. */
+ else if (dev->power.runtime_status != RPM_ACTIVE)
+ retval = -EAGAIN;
+
+ /*
+ * Any pending request other than an idle notification takes
+ * precedence over us, except that the timer may be running.
+ */
+ else if (dev->power.request_pending &&
+ dev->power.request > RPM_REQ_IDLE)
+ retval = -EAGAIN;
+
+ /* Act as though RPM_NOWAIT is always set. */
+ else if (dev->power.idle_notification)
+ retval = -EINPROGRESS;
if (retval)
goto out;
- if (dev->power.request_pending) {
- /*
- * If an idle notification request is pending, cancel it. Any
- * other pending request takes precedence over us.
- */
- if (dev->power.request == RPM_REQ_IDLE) {
- dev->power.request = RPM_REQ_NONE;
- } else if (dev->power.request != RPM_REQ_NONE) {
- retval = -EAGAIN;
- goto out;
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+ if (dev->power.no_callbacks) {
+ /* Assume ->runtime_idle() callback would have suspended. */
+ retval = rpm_suspend(dev, rpmflags);
+ goto out;
+ }
+
+ /* Carry out an asynchronous or a synchronous idle notification. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = RPM_REQ_IDLE;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
}
+ goto out;
}
dev->power.idle_notification = true;
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
- spin_unlock_irq(&dev->power.lock);
-
- dev->bus->pm->runtime_idle(dev);
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
+ callback = dev->bus->pm->runtime_idle;
+ else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
+ callback = dev->type->pm->runtime_idle;
+ else if (dev->class && dev->class->pm)
+ callback = dev->class->pm->runtime_idle;
+ else
+ callback = NULL;
- spin_lock_irq(&dev->power.lock);
- } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
+ if (callback) {
spin_unlock_irq(&dev->power.lock);
- dev->type->pm->runtime_idle(dev);
-
- spin_lock_irq(&dev->power.lock);
- } else if (dev->class && dev->class->pm
- && dev->class->pm->runtime_idle) {
- spin_unlock_irq(&dev->power.lock);
-
- dev->class->pm->runtime_idle(dev);
+ callback(dev);
spin_lock_irq(&dev->power.lock);
}
@@ -108,74 +238,99 @@ static int __pm_runtime_idle(struct device *dev)
}
/**
- * pm_runtime_idle - Notify device bus type if the device can be suspended.
- * @dev: Device to notify the bus type about.
+ * rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
*/
-int pm_runtime_idle(struct device *dev)
+static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
- spin_lock_irq(&dev->power.lock);
- retval = __pm_runtime_idle(dev);
+ if (!cb)
+ return -ENOSYS;
+
spin_unlock_irq(&dev->power.lock);
+ retval = cb(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_idle);
/**
- * __pm_runtime_suspend - Carry out run-time suspend of given device.
+ * rpm_suspend - Carry out run-time suspend of given device.
* @dev: Device to suspend.
- * @from_wq: If set, the function has been called via pm_wq.
+ * @rpmflags: Flag bits.
*
- * Check if the device can be suspended and run the ->runtime_suspend() callback
- * provided by its bus type. If another suspend has been started earlier, wait
- * for it to finish. If an idle notification or suspend request is pending or
- * scheduled, cancel it.
+ * Check if the device's run-time PM status allows it to be suspended. If
+ * another suspend has been started earlier, either return immediately or wait
+ * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
+ * pending idle notification. If the RPM_ASYNC flag is set then queue a
+ * suspend request; otherwise run the ->runtime_suspend() callback directly.
+ * If a deferred resume was requested while the callback was running then carry
+ * it out; otherwise send an idle notification for the device (if the suspend
+ * failed) or for its parent (if the suspend succeeded).
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
-int __pm_runtime_suspend(struct device *dev, bool from_wq)
+static int rpm_suspend(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
+ int (*callback)(struct device *);
struct device *parent = NULL;
- bool notify = false;
- int retval = 0;
+ int retval;
- dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
- from_wq ? " from workqueue" : "");
+ dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
repeat:
- if (dev->power.runtime_error) {
- retval = -EINVAL;
- goto out;
- }
+ retval = rpm_check_suspend_allowed(dev);
- /* Pending resume requests take precedence over us. */
- if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ if (retval < 0)
+ ; /* Conditions are wrong. */
+
+ /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
+ else if (dev->power.runtime_status == RPM_RESUMING &&
+ !(rpmflags & RPM_ASYNC))
retval = -EAGAIN;
+ if (retval)
goto out;
+
+ /* If the autosuspend_delay time hasn't expired yet, reschedule. */
+ if ((rpmflags & RPM_AUTO)
+ && dev->power.runtime_status != RPM_SUSPENDING) {
+ unsigned long expires = pm_runtime_autosuspend_expiration(dev);
+
+ if (expires != 0) {
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+ /*
+ * Optimization: If the timer is already running and is
+ * set to expire at or before the autosuspend delay,
+ * avoid the overhead of resetting it. Just let it
+ * expire; pm_suspend_timer_fn() will take care of the
+ * rest.
+ */
+ if (!(dev->power.timer_expires && time_before_eq(
+ dev->power.timer_expires, expires))) {
+ dev->power.timer_expires = expires;
+ mod_timer(&dev->power.suspend_timer, expires);
+ }
+ dev->power.timer_autosuspends = 1;
+ goto out;
+ }
}
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
- if (dev->power.runtime_status == RPM_SUSPENDED)
- retval = 1;
- else if (dev->power.runtime_status == RPM_RESUMING
- || dev->power.disable_depth > 0
- || atomic_read(&dev->power.usage_count) > 0)
- retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
- retval = -EBUSY;
- if (retval)
- goto out;
-
if (dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
- if (from_wq) {
+ if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
retval = -EINPROGRESS;
goto out;
}
@@ -197,46 +352,44 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
goto repeat;
}
- dev->power.runtime_status = RPM_SUSPENDING;
dev->power.deferred_resume = false;
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+ /* Carry out an asynchronous or a synchronous suspend. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = (rpmflags & RPM_AUTO) ?
+ RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ goto out;
+ }
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
- spin_unlock_irq(&dev->power.lock);
-
- retval = dev->bus->pm->runtime_suspend(dev);
-
- spin_lock_irq(&dev->power.lock);
- dev->power.runtime_error = retval;
- } else if (dev->type && dev->type->pm
- && dev->type->pm->runtime_suspend) {
- spin_unlock_irq(&dev->power.lock);
-
- retval = dev->type->pm->runtime_suspend(dev);
-
- spin_lock_irq(&dev->power.lock);
- dev->power.runtime_error = retval;
- } else if (dev->class && dev->class->pm
- && dev->class->pm->runtime_suspend) {
- spin_unlock_irq(&dev->power.lock);
-
- retval = dev->class->pm->runtime_suspend(dev);
+ __update_runtime_status(dev, RPM_SUSPENDING);
- spin_lock_irq(&dev->power.lock);
- dev->power.runtime_error = retval;
- } else {
- retval = -ENOSYS;
- }
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+ callback = dev->bus->pm->runtime_suspend;
+ else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
+ callback = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ callback = dev->class->pm->runtime_suspend;
+ else
+ callback = NULL;
+ retval = rpm_callback(callback, dev);
if (retval) {
- dev->power.runtime_status = RPM_ACTIVE;
- pm_runtime_cancel_pending(dev);
-
- if (retval == -EAGAIN || retval == -EBUSY) {
- notify = true;
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.deferred_resume = 0;
+ if (retval == -EAGAIN || retval == -EBUSY)
dev->power.runtime_error = 0;
- }
+ else
+ pm_runtime_cancel_pending(dev);
} else {
- dev->power.runtime_status = RPM_SUSPENDED;
+ no_callback:
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
@@ -246,14 +399,11 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
- __pm_runtime_resume(dev, false);
+ rpm_resume(dev, 0);
retval = -EAGAIN;
goto out;
}
- if (notify)
- __pm_runtime_idle(dev);
-
if (parent && !parent->power.ignore_children) {
spin_unlock_irq(&dev->power.lock);
@@ -263,72 +413,69 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
}
out:
- dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
-
- return retval;
-}
-
-/**
- * pm_runtime_suspend - Carry out run-time suspend of given device.
- * @dev: Device to suspend.
- */
-int pm_runtime_suspend(struct device *dev)
-{
- int retval;
-
- spin_lock_irq(&dev->power.lock);
- retval = __pm_runtime_suspend(dev, false);
- spin_unlock_irq(&dev->power.lock);
+ dev_dbg(dev, "%s returns %d\n", __func__, retval);
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_suspend);
/**
- * __pm_runtime_resume - Carry out run-time resume of given device.
+ * rpm_resume - Carry out run-time resume of given device.
* @dev: Device to resume.
- * @from_wq: If set, the function has been called via pm_wq.
+ * @rpmflags: Flag bits.
*
- * Check if the device can be woken up and run the ->runtime_resume() callback
- * provided by its bus type. If another resume has been started earlier, wait
- * for it to finish. If there's a suspend running in parallel with this
- * function, wait for it to finish and resume the device. Cancel any scheduled
- * or pending requests.
+ * Check if the device's run-time PM status allows it to be resumed. Cancel
+ * any scheduled or pending requests. If another resume has been started
+ * earlier, either return imediately or wait for it to finish, depending on the
+ * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
+ * parallel with this function, either tell the other process to resume after
+ * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
+ * flag is set then queue a resume request; otherwise run the
+ * ->runtime_resume() callback directly. Queue an idle notification for the
+ * device if the resume succeeded.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
-int __pm_runtime_resume(struct device *dev, bool from_wq)
+static int rpm_resume(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
+ int (*callback)(struct device *);
struct device *parent = NULL;
int retval = 0;
- dev_dbg(dev, "__pm_runtime_resume()%s!\n",
- from_wq ? " from workqueue" : "");
+ dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
repeat:
- if (dev->power.runtime_error) {
+ if (dev->power.runtime_error)
retval = -EINVAL;
+ else if (dev->power.disable_depth > 0)
+ retval = -EAGAIN;
+ if (retval)
goto out;
- }
- pm_runtime_cancel_pending(dev);
+ /*
+ * Other scheduled or pending requests need to be canceled. Small
+ * optimization: If an autosuspend timer is running, leave it running
+ * rather than cancelling it now only to restart it again in the near
+ * future.
+ */
+ dev->power.request = RPM_REQ_NONE;
+ if (!dev->power.timer_autosuspends)
+ pm_runtime_deactivate_timer(dev);
- if (dev->power.runtime_status == RPM_ACTIVE)
+ if (dev->power.runtime_status == RPM_ACTIVE) {
retval = 1;
- else if (dev->power.disable_depth > 0)
- retval = -EAGAIN;
- if (retval)
goto out;
+ }
if (dev->power.runtime_status == RPM_RESUMING
|| dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
- if (from_wq) {
+ if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
if (dev->power.runtime_status == RPM_SUSPENDING)
dev->power.deferred_resume = true;
- retval = -EINPROGRESS;
+ else
+ retval = -EINPROGRESS;
goto out;
}
@@ -350,6 +497,34 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
goto repeat;
}
+ /*
+ * See if we can skip waking up the parent. This is safe only if
+ * power.no_callbacks is set, because otherwise we don't know whether
+ * the resume will actually succeed.
+ */
+ if (dev->power.no_callbacks && !parent && dev->parent) {
+ spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
+ if (dev->parent->power.disable_depth > 0
+ || dev->parent->power.ignore_children
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+ }
+
+ /* Carry out an asynchronous or a synchronous resume. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = RPM_REQ_RESUME;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ retval = 0;
+ goto out;
+ }
+
if (!parent && dev->parent) {
/*
* Increment the parent's resume counter and resume it if
@@ -367,7 +542,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children) {
- __pm_runtime_resume(parent, false);
+ rpm_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY;
}
@@ -379,47 +554,34 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
goto repeat;
}
- dev->power.runtime_status = RPM_RESUMING;
-
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
- spin_unlock_irq(&dev->power.lock);
-
- retval = dev->bus->pm->runtime_resume(dev);
-
- spin_lock_irq(&dev->power.lock);
- dev->power.runtime_error = retval;
- } else if (dev->type && dev->type->pm
- && dev->type->pm->runtime_resume) {
- spin_unlock_irq(&dev->power.lock);
-
- retval = dev->type->pm->runtime_resume(dev);
-
- spin_lock_irq(&dev->power.lock);
- dev->power.runtime_error = retval;
- } else if (dev->class && dev->class->pm
- && dev->class->pm->runtime_resume) {
- spin_unlock_irq(&dev->power.lock);
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
- retval = dev->class->pm->runtime_resume(dev);
+ __update_runtime_status(dev, RPM_RESUMING);
- spin_lock_irq(&dev->power.lock);
- dev->power.runtime_error = retval;
- } else {
- retval = -ENOSYS;
- }
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+ callback = dev->bus->pm->runtime_resume;
+ else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
+ callback = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ callback = dev->class->pm->runtime_resume;
+ else
+ callback = NULL;
+ retval = rpm_callback(callback, dev);
if (retval) {
- dev->power.runtime_status = RPM_SUSPENDED;
+ __update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
} else {
- dev->power.runtime_status = RPM_ACTIVE;
+ no_callback:
+ __update_runtime_status(dev, RPM_ACTIVE);
if (parent)
atomic_inc(&parent->power.child_count);
}
wake_up_all(&dev->power.wait_queue);
if (!retval)
- __pm_request_idle(dev);
+ rpm_idle(dev, RPM_ASYNC);
out:
if (parent) {
@@ -430,26 +592,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
spin_lock_irq(&dev->power.lock);
}
- dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
-
- return retval;
-}
-
-/**
- * pm_runtime_resume - Carry out run-time resume of given device.
- * @dev: Device to suspend.
- */
-int pm_runtime_resume(struct device *dev)
-{
- int retval;
-
- spin_lock_irq(&dev->power.lock);
- retval = __pm_runtime_resume(dev, false);
- spin_unlock_irq(&dev->power.lock);
+ dev_dbg(dev, "%s returns %d\n", __func__, retval);
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_resume);
/**
* pm_runtime_work - Universal run-time PM work function.
@@ -476,13 +622,16 @@ static void pm_runtime_work(struct work_struct *work)
case RPM_REQ_NONE:
break;
case RPM_REQ_IDLE:
- __pm_runtime_idle(dev);
+ rpm_idle(dev, RPM_NOWAIT);
break;
case RPM_REQ_SUSPEND:
- __pm_runtime_suspend(dev, true);
+ rpm_suspend(dev, RPM_NOWAIT);
+ break;
+ case RPM_REQ_AUTOSUSPEND:
+ rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
break;
case RPM_REQ_RESUME:
- __pm_runtime_resume(dev, true);
+ rpm_resume(dev, RPM_NOWAIT);
break;
}
@@ -491,117 +640,10 @@ static void pm_runtime_work(struct work_struct *work)
}
/**
- * __pm_request_idle - Submit an idle notification request for given device.
- * @dev: Device to handle.
- *
- * Check if the device's run-time PM status is correct for suspending the device
- * and queue up a request to run __pm_runtime_idle() for it.
- *
- * This function must be called under dev->power.lock with interrupts disabled.
- */
-static int __pm_request_idle(struct device *dev)
-{
- int retval = 0;
-
- if (dev->power.runtime_error)
- retval = -EINVAL;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0
- || dev->power.runtime_status == RPM_SUSPENDED
- || dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
- retval = -EBUSY;
- if (retval)
- return retval;
-
- if (dev->power.request_pending) {
- /* Any requests other then RPM_REQ_IDLE take precedence. */
- if (dev->power.request == RPM_REQ_NONE)
- dev->power.request = RPM_REQ_IDLE;
- else if (dev->power.request != RPM_REQ_IDLE)
- retval = -EAGAIN;
- return retval;
- }
-
- dev->power.request = RPM_REQ_IDLE;
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
-
- return retval;
-}
-
-/**
- * pm_request_idle - Submit an idle notification request for given device.
- * @dev: Device to handle.
- */
-int pm_request_idle(struct device *dev)
-{
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&dev->power.lock, flags);
- retval = __pm_request_idle(dev);
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(pm_request_idle);
-
-/**
- * __pm_request_suspend - Submit a suspend request for given device.
- * @dev: Device to suspend.
- *
- * This function must be called under dev->power.lock with interrupts disabled.
- */
-static int __pm_request_suspend(struct device *dev)
-{
- int retval = 0;
-
- if (dev->power.runtime_error)
- return -EINVAL;
-
- if (dev->power.runtime_status == RPM_SUSPENDED)
- retval = 1;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0)
- retval = -EAGAIN;
- else if (dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EINPROGRESS;
- else if (!pm_children_suspended(dev))
- retval = -EBUSY;
- if (retval < 0)
- return retval;
-
- pm_runtime_deactivate_timer(dev);
-
- if (dev->power.request_pending) {
- /*
- * Pending resume requests take precedence over us, but we can
- * overtake any other pending request.
- */
- if (dev->power.request == RPM_REQ_RESUME)
- retval = -EAGAIN;
- else if (dev->power.request != RPM_REQ_SUSPEND)
- dev->power.request = retval ?
- RPM_REQ_NONE : RPM_REQ_SUSPEND;
- return retval;
- } else if (retval) {
- return retval;
- }
-
- dev->power.request = RPM_REQ_SUSPEND;
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
-
- return 0;
-}
-
-/**
* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
* @data: Device pointer passed by pm_schedule_suspend().
*
- * Check if the time is right and execute __pm_request_suspend() in that case.
+ * Check if the time is right and queue a suspend request.
*/
static void pm_suspend_timer_fn(unsigned long data)
{
@@ -615,7 +657,8 @@ static void pm_suspend_timer_fn(unsigned long data)
/* If 'expire' is after 'jiffies' we've been called too early. */
if (expires > 0 && !time_after(expires, jiffies)) {
dev->power.timer_expires = 0;
- __pm_request_suspend(dev);
+ rpm_suspend(dev, dev->power.timer_autosuspends ?
+ (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
}
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -629,49 +672,25 @@ static void pm_suspend_timer_fn(unsigned long data)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
- int retval = 0;
+ int retval;
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.runtime_error) {
- retval = -EINVAL;
- goto out;
- }
-
if (!delay) {
- retval = __pm_request_suspend(dev);
+ retval = rpm_suspend(dev, RPM_ASYNC);
goto out;
}
- pm_runtime_deactivate_timer(dev);
-
- if (dev->power.request_pending) {
- /*
- * Pending resume requests take precedence over us, but any
- * other pending requests have to be canceled.
- */
- if (dev->power.request == RPM_REQ_RESUME) {
- retval = -EAGAIN;
- goto out;
- }
- dev->power.request = RPM_REQ_NONE;
- }
-
- if (dev->power.runtime_status == RPM_SUSPENDED)
- retval = 1;
- else if (dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EINPROGRESS;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0)
- retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
- retval = -EBUSY;
+ retval = rpm_check_suspend_allowed(dev);
if (retval)
goto out;
+ /* Other scheduled or pending requests need to be canceled. */
+ pm_runtime_cancel_pending(dev);
+
dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
- if (!dev->power.timer_expires)
- dev->power.timer_expires = 1;
+ dev->power.timer_expires += !dev->power.timer_expires;
+ dev->power.timer_autosuspends = 0;
mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
out:
@@ -682,103 +701,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
/**
- * pm_request_resume - Submit a resume request for given device.
- * @dev: Device to resume.
+ * __pm_runtime_idle - Entry point for run-time idle operations.
+ * @dev: Device to send idle notification for.
+ * @rpmflags: Flag bits.
*
- * This function must be called under dev->power.lock with interrupts disabled.
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero. Then carry out an idle
+ * notification, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set.
*/
-static int __pm_request_resume(struct device *dev)
+int __pm_runtime_idle(struct device *dev, int rpmflags)
{
- int retval = 0;
-
- if (dev->power.runtime_error)
- return -EINVAL;
-
- if (dev->power.runtime_status == RPM_ACTIVE)
- retval = 1;
- else if (dev->power.runtime_status == RPM_RESUMING)
- retval = -EINPROGRESS;
- else if (dev->power.disable_depth > 0)
- retval = -EAGAIN;
- if (retval < 0)
- return retval;
-
- pm_runtime_deactivate_timer(dev);
+ unsigned long flags;
+ int retval;
- if (dev->power.runtime_status == RPM_SUSPENDING) {
- dev->power.deferred_resume = true;
- return retval;
+ if (rpmflags & RPM_GET_PUT) {
+ if (!atomic_dec_and_test(&dev->power.usage_count))
+ return 0;
}
- if (dev->power.request_pending) {
- /* If non-resume request is pending, we can overtake it. */
- dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
- return retval;
- }
- if (retval)
- return retval;
- dev->power.request = RPM_REQ_RESUME;
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_idle(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
- * pm_request_resume - Submit a resume request for given device.
- * @dev: Device to resume.
+ * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero. Then carry out a suspend,
+ * either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set.
*/
-int pm_request_resume(struct device *dev)
+int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ if (rpmflags & RPM_GET_PUT) {
+ if (!atomic_dec_and_test(&dev->power.usage_count))
+ return 0;
+ }
+
spin_lock_irqsave(&dev->power.lock, flags);
- retval = __pm_request_resume(dev);
+ retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
-EXPORT_SYMBOL_GPL(pm_request_resume);
+EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
- * __pm_runtime_get - Reference count a device and wake it up, if necessary.
- * @dev: Device to handle.
- * @sync: If set and the device is suspended, resume it synchronously.
+ * __pm_runtime_resume - Entry point for run-time resume operations.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
+ * carry out a resume, either synchronous or asynchronous.
*
- * Increment the usage count of the device and resume it or submit a resume
- * request for it, depending on the value of @sync.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set.
*/
-int __pm_runtime_get(struct device *dev, bool sync)
+int __pm_runtime_resume(struct device *dev, int rpmflags)
{
+ unsigned long flags;
int retval;
- atomic_inc(&dev->power.usage_count);
- retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(__pm_runtime_get);
+ if (rpmflags & RPM_GET_PUT)
+ atomic_inc(&dev->power.usage_count);
-/**
- * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
- * @dev: Device to handle.
- * @sync: If the device's bus type is to be notified, do that synchronously.
- *
- * Decrement the usage count of the device and if it reaches zero, carry out a
- * synchronous idle notification or submit an idle notification request for it,
- * depending on the value of @sync.
- */
-int __pm_runtime_put(struct device *dev, bool sync)
-{
- int retval = 0;
-
- if (atomic_dec_and_test(&dev->power.usage_count))
- retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_resume(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
-EXPORT_SYMBOL_GPL(__pm_runtime_put);
+EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
* __pm_runtime_set_status - Set run-time PM status of a device.
@@ -848,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
}
out_set:
- dev->power.runtime_status = status;
+ __update_runtime_status(dev, status);
dev->power.runtime_error = 0;
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -929,7 +933,7 @@ int pm_runtime_barrier(struct device *dev)
if (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME) {
- __pm_runtime_resume(dev, false);
+ rpm_resume(dev, 0);
retval = 1;
}
@@ -978,7 +982,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
*/
pm_runtime_get_noresume(dev);
- __pm_runtime_resume(dev, false);
+ rpm_resume(dev, 0);
pm_runtime_put_noidle(dev);
}
@@ -1011,13 +1015,154 @@ void pm_runtime_enable(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_enable);
/**
+ * pm_runtime_forbid - Block run-time PM of a device.
+ * @dev: Device to handle.
+ *
+ * Increase the device's usage count and clear its power.runtime_auto flag,
+ * so that it cannot be suspended at run time until pm_runtime_allow() is called
+ * for it.
+ */
+void pm_runtime_forbid(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (!dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = false;
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_forbid);
+
+/**
+ * pm_runtime_allow - Unblock run-time PM of a device.
+ * @dev: Device to handle.
+ *
+ * Decrease the device's usage count and set its power.runtime_auto flag.
+ */
+void pm_runtime_allow(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = true;
+ if (atomic_dec_and_test(&dev->power.usage_count))
+ rpm_idle(dev, RPM_AUTO);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_allow);
+
+/**
+ * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
+ * @dev: Device to handle.
+ *
+ * Set the power.no_callbacks flag, which tells the PM core that this
+ * device is power-managed through its parent and has no run-time PM
+ * callbacks of its own. The run-time sysfs attributes will be removed.
+ *
+ */
+void pm_runtime_no_callbacks(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.no_callbacks = 1;
+ spin_unlock_irq(&dev->power.lock);
+ if (device_is_registered(dev))
+ rpm_sysfs_remove(dev);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
+
+/**
+ * update_autosuspend - Handle a change to a device's autosuspend settings.
+ * @dev: Device to handle.
+ * @old_delay: The former autosuspend_delay value.
+ * @old_use: The former use_autosuspend value.
+ *
+ * Prevent runtime suspend if the new delay is negative and use_autosuspend is
+ * set; otherwise allow it. Send an idle notification if suspends are allowed.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static void update_autosuspend(struct device *dev, int old_delay, int old_use)
+{
+ int delay = dev->power.autosuspend_delay;
+
+ /* Should runtime suspend be prevented now? */
+ if (dev->power.use_autosuspend && delay < 0) {
+
+ /* If it used to be allowed then prevent it. */
+ if (!old_use || old_delay >= 0) {
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
+ }
+ }
+
+ /* Runtime suspend should be allowed now. */
+ else {
+
+ /* If it used to be prevented then allow it. */
+ if (old_use && old_delay < 0)
+ atomic_dec(&dev->power.usage_count);
+
+ /* Maybe we can autosuspend now. */
+ rpm_idle(dev, RPM_AUTO);
+ }
+}
+
+/**
+ * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
+ * @dev: Device to handle.
+ * @delay: Value of the new delay in milliseconds.
+ *
+ * Set the device's power.autosuspend_delay value. If it changes to negative
+ * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
+ * changes the other way, allow run-time suspends.
+ */
+void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
+{
+ int old_delay, old_use;
+
+ spin_lock_irq(&dev->power.lock);
+ old_delay = dev->power.autosuspend_delay;
+ old_use = dev->power.use_autosuspend;
+ dev->power.autosuspend_delay = delay;
+ update_autosuspend(dev, old_delay, old_use);
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
+
+/**
+ * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
+ * @dev: Device to handle.
+ * @use: New value for use_autosuspend.
+ *
+ * Set the device's power.use_autosuspend flag, and allow or prevent run-time
+ * suspends as needed.
+ */
+void __pm_runtime_use_autosuspend(struct device *dev, bool use)
+{
+ int old_delay, old_use;
+
+ spin_lock_irq(&dev->power.lock);
+ old_delay = dev->power.autosuspend_delay;
+ old_use = dev->power.use_autosuspend;
+ dev->power.use_autosuspend = use;
+ update_autosuspend(dev, old_delay, old_use);
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
+
+/**
* pm_runtime_init - Initialize run-time PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
{
- spin_lock_init(&dev->power.lock);
-
dev->power.runtime_status = RPM_SUSPENDED;
dev->power.idle_notification = false;
@@ -1028,10 +1173,12 @@ void pm_runtime_init(struct device *dev)
atomic_set(&dev->power.child_count, 0);
pm_suspend_ignore_children(dev, false);
+ dev->power.runtime_auto = true;
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
+ dev->power.accounting_timestamp = jiffies;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 596aeecfdffe..0b1e46bf3e56 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -4,9 +4,27 @@
#include <linux/device.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
+#include <asm/atomic.h>
+#include <linux/jiffies.h>
#include "power.h"
/*
+ * control - Report/change current runtime PM setting of the device
+ *
+ * Runtime power management of a device can be blocked with the help of
+ * this attribute. All devices have one of the following two values for
+ * the power/control file:
+ *
+ * + "auto\n" to allow the device to be power managed at run time;
+ * + "on\n" to prevent the device from being power managed at run time;
+ *
+ * The default for all devices is "auto", which means that devices may be
+ * subject to automatic power management, depending on their drivers.
+ * Changing this attribute to "on" prevents the driver from power managing
+ * the device at run time. Doing that while the device is suspended causes
+ * it to be woken up.
+ *
* wakeup - Report/change current wakeup option for device
*
* Some devices support "wakeup" events, which are hardware signals
@@ -38,11 +56,164 @@
* wakeup events internally (unless they are disabled), keeping
* their hardware in low power modes whenever they're unused. This
* saves runtime power, without requiring system-wide sleep states.
+ *
+ * async - Report/change current async suspend setting for the device
+ *
+ * Asynchronous suspend and resume of the device during system-wide power
+ * state transitions can be enabled by writing "enabled" to this file.
+ * Analogously, if "disabled" is written to this file, the device will be
+ * suspended and resumed synchronously.
+ *
+ * All devices have one of the following two values for power/async:
+ *
+ * + "enabled\n" to permit the asynchronous suspend/resume of the device;
+ * + "disabled\n" to forbid it;
+ *
+ * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ * of a device unless it is certain that all of the PM dependencies of the
+ * device are known to the PM core. However, for some devices this
+ * attribute is set to "enabled" by bus type code or device drivers and in
+ * that cases it should be safe to leave the default value.
+ *
+ * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
+ *
+ * Some drivers don't want to carry out a runtime suspend as soon as a
+ * device becomes idle; they want it always to remain idle for some period
+ * of time before suspending it. This period is the autosuspend_delay
+ * value (expressed in milliseconds) and it can be controlled by the user.
+ * If the value is negative then the device will never be runtime
+ * suspended.
+ *
+ * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
+ * value are used only if the driver calls pm_runtime_use_autosuspend().
+ *
+ * wakeup_count - Report the number of wakeup events related to the device
*/
static const char enabled[] = "enabled";
static const char disabled[] = "disabled";
+const char power_group_name[] = "power";
+EXPORT_SYMBOL_GPL(power_group_name);
+
+#ifdef CONFIG_PM_RUNTIME
+static const char ctrl_auto[] = "auto";
+static const char ctrl_on[] = "on";
+
+static ssize_t control_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+}
+
+static ssize_t control_store(struct device * dev, struct device_attribute *attr,
+ const char * buf, size_t n)
+{
+ char *cp;
+ int len = n;
+
+ cp = memchr(buf, '\n', n);
+ if (cp)
+ len = cp - buf;
+ if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ pm_runtime_allow(dev);
+ else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ pm_runtime_forbid(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR(control, 0644, control_show, control_store);
+
+static ssize_t rtpm_active_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ spin_lock_irq(&dev->power.lock);
+ update_pm_runtime_accounting(dev);
+ ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
+ spin_unlock_irq(&dev->power.lock);
+ return ret;
+}
+
+static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+
+static ssize_t rtpm_suspended_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ spin_lock_irq(&dev->power.lock);
+ update_pm_runtime_accounting(dev);
+ ret = sprintf(buf, "%i\n",
+ jiffies_to_msecs(dev->power.suspended_jiffies));
+ spin_unlock_irq(&dev->power.lock);
+ return ret;
+}
+
+static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+
+static ssize_t rtpm_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *p;
+
+ if (dev->power.runtime_error) {
+ p = "error\n";
+ } else if (dev->power.disable_depth) {
+ p = "unsupported\n";
+ } else {
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ p = "suspended\n";
+ break;
+ case RPM_SUSPENDING:
+ p = "suspending\n";
+ break;
+ case RPM_RESUMING:
+ p = "resuming\n";
+ break;
+ case RPM_ACTIVE:
+ p = "active\n";
+ break;
+ default:
+ return -EIO;
+ }
+ }
+ return sprintf(buf, p);
+}
+
+static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+
+static ssize_t autosuspend_delay_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!dev->power.use_autosuspend)
+ return -EIO;
+ return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
+}
+
+static ssize_t autosuspend_delay_ms_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ long delay;
+
+ if (!dev->power.use_autosuspend)
+ return -EIO;
+
+ if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
+ return -EINVAL;
+
+ pm_runtime_set_autosuspend_delay(dev, delay);
+ return n;
+}
+
+static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
+ autosuspend_delay_ms_store);
+
+#endif
+
static ssize_t
wake_show(struct device * dev, struct device_attribute *attr, char * buf)
{
@@ -77,16 +248,259 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+#ifdef CONFIG_PM_SLEEP
+static ssize_t wakeup_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->event_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+
+static ssize_t wakeup_active_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->active_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+
+static ssize_t wakeup_hit_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->hit_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL);
+
+static ssize_t wakeup_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int active = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ active = dev->power.wakeup->active;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+
+static ssize_t wakeup_total_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->total_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+
+static ssize_t wakeup_max_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->max_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+
+static ssize_t wakeup_last_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->last_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_RUNTIME
+
+static ssize_t rtpm_usagecount_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+
+static ssize_t rtpm_children_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+
+static ssize_t rtpm_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ return sprintf(buf, "disabled & forbidden\n");
+ else if (dev->power.disable_depth)
+ return sprintf(buf, "disabled\n");
+ else if (dev->power.runtime_auto == false)
+ return sprintf(buf, "forbidden\n");
+ return sprintf(buf, "enabled\n");
+}
+
+static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
+static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
+static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+
+#endif
+
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ device_async_suspend_enabled(dev) ? enabled : disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ char *cp;
+ int len = n;
+
+ cp = memchr(buf, '\n', n);
+ if (cp)
+ len = cp - buf;
+ if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
+ device_enable_async_suspend(dev);
+ else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
+ device_disable_async_suspend(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR(async, 0644, async_show, async_store);
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
&dev_attr_wakeup.attr,
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_wakeup_active_count.attr,
+ &dev_attr_wakeup_hit_count.attr,
+ &dev_attr_wakeup_active.attr,
+ &dev_attr_wakeup_total_time_ms.attr,
+ &dev_attr_wakeup_max_time_ms.attr,
+ &dev_attr_wakeup_last_time_ms.attr,
+#endif
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_async.attr,
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
+#endif
NULL,
};
static struct attribute_group pm_attr_group = {
- .name = "power",
+ .name = power_group_name,
.attrs = power_attrs,
};
+#ifdef CONFIG_PM_RUNTIME
+
+static struct attribute *runtime_attrs[] = {
+#ifndef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_status.attr,
+#endif
+ &dev_attr_control.attr,
+ &dev_attr_runtime_suspended_time.attr,
+ &dev_attr_runtime_active_time.attr,
+ &dev_attr_autosuspend_delay_ms.attr,
+ NULL,
+};
+static struct attribute_group pm_runtime_attr_group = {
+ .name = power_group_name,
+ .attrs = runtime_attrs,
+};
+
+int dpm_sysfs_add(struct device *dev)
+{
+ int rc;
+
+ rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
+ if (rc == 0 && !dev->power.no_callbacks) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
+ if (rc)
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ }
+ return rc;
+}
+
+void rpm_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+}
+
+void dpm_sysfs_remove(struct device *dev)
+{
+ rpm_sysfs_remove(dev);
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
int dpm_sysfs_add(struct device * dev)
{
return sysfs_create_group(&dev->kobj, &pm_attr_group);
@@ -96,3 +510,5 @@ void dpm_sysfs_remove(struct device * dev)
{
sysfs_remove_group(&dev->kobj, &pm_attr_group);
}
+
+#endif
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 0a1a2c4dbc6e..9f4258df4cfd 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -188,8 +188,10 @@ static int show_file_hash(unsigned int value)
static int show_dev_hash(unsigned int value)
{
int match = 0;
- struct list_head *entry = dpm_list.prev;
+ struct list_head *entry;
+ device_pm_lock();
+ entry = dpm_list.prev;
while (entry != &dpm_list) {
struct device * dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
@@ -199,11 +201,43 @@ static int show_dev_hash(unsigned int value)
}
entry = entry->prev;
}
+ device_pm_unlock();
return match;
}
static unsigned int hash_value_early_read;
+int show_trace_dev_match(char *buf, size_t size)
+{
+ unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
+ int ret = 0;
+ struct list_head *entry;
+
+ /*
+ * It's possible that multiple devices will match the hash and we can't
+ * tell which is the culprit, so it's best to output them all.
+ */
+ device_pm_lock();
+ entry = dpm_list.prev;
+ while (size && entry != &dpm_list) {
+ struct device *dev = to_device(entry);
+ unsigned int hash = hash_string(DEVSEED, dev_name(dev),
+ DEVHASH);
+ if (hash == value) {
+ int len = snprintf(buf, size, "%s\n",
+ dev_driver_string(dev));
+ if (len > size)
+ len = size;
+ buf += len;
+ ret += len;
+ size -= len;
+ }
+ entry = entry->prev;
+ }
+ device_pm_unlock();
+ return ret;
+}
+
static int early_resume_init(void)
{
hash_value_early_read = read_magic_time();
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
new file mode 100644
index 000000000000..71c5528e1c35
--- /dev/null
+++ b/drivers/base/power/wakeup.c
@@ -0,0 +1,704 @@
+/*
+ * drivers/base/power/wakeup.c - System wakeup events framework
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/capability.h>
+#include <linux/suspend.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include "power.h"
+
+#define TIMEOUT 100
+
+/*
+ * If set, the suspend/hibernate code will abort transitions to a sleep state
+ * if wakeup events are registered during or immediately before the transition.
+ */
+bool events_check_enabled;
+
+/* The counter of registered wakeup events. */
+static atomic_t event_count = ATOMIC_INIT(0);
+/* A preserved old value of event_count. */
+static unsigned int saved_count;
+/* The counter of wakeup events being processed. */
+static atomic_t events_in_progress = ATOMIC_INIT(0);
+
+static DEFINE_SPINLOCK(events_lock);
+
+static void pm_wakeup_timer_fn(unsigned long data);
+
+static LIST_HEAD(wakeup_sources);
+
+/**
+ * wakeup_source_create - Create a struct wakeup_source object.
+ * @name: Name of the new wakeup source.
+ */
+struct wakeup_source *wakeup_source_create(const char *name)
+{
+ struct wakeup_source *ws;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return NULL;
+
+ spin_lock_init(&ws->lock);
+ if (name)
+ ws->name = kstrdup(name, GFP_KERNEL);
+
+ return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_create);
+
+/**
+ * wakeup_source_destroy - Destroy a struct wakeup_source object.
+ * @ws: Wakeup source to destroy.
+ */
+void wakeup_source_destroy(struct wakeup_source *ws)
+{
+ if (!ws)
+ return;
+
+ spin_lock_irq(&ws->lock);
+ while (ws->active) {
+ spin_unlock_irq(&ws->lock);
+
+ schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
+
+ spin_lock_irq(&ws->lock);
+ }
+ spin_unlock_irq(&ws->lock);
+
+ kfree(ws->name);
+ kfree(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_destroy);
+
+/**
+ * wakeup_source_add - Add given object to the list of wakeup sources.
+ * @ws: Wakeup source object to add to the list.
+ */
+void wakeup_source_add(struct wakeup_source *ws)
+{
+ if (WARN_ON(!ws))
+ return;
+
+ setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
+ ws->active = false;
+
+ spin_lock_irq(&events_lock);
+ list_add_rcu(&ws->entry, &wakeup_sources);
+ spin_unlock_irq(&events_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+/**
+ * wakeup_source_remove - Remove given object from the wakeup sources list.
+ * @ws: Wakeup source object to remove from the list.
+ */
+void wakeup_source_remove(struct wakeup_source *ws)
+{
+ if (WARN_ON(!ws))
+ return;
+
+ spin_lock_irq(&events_lock);
+ list_del_rcu(&ws->entry);
+ spin_unlock_irq(&events_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(wakeup_source_remove);
+
+/**
+ * wakeup_source_register - Create wakeup source and add it to the list.
+ * @name: Name of the wakeup source to register.
+ */
+struct wakeup_source *wakeup_source_register(const char *name)
+{
+ struct wakeup_source *ws;
+
+ ws = wakeup_source_create(name);
+ if (ws)
+ wakeup_source_add(ws);
+
+ return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_register);
+
+/**
+ * wakeup_source_unregister - Remove wakeup source from the list and remove it.
+ * @ws: Wakeup source object to unregister.
+ */
+void wakeup_source_unregister(struct wakeup_source *ws)
+{
+ wakeup_source_remove(ws);
+ wakeup_source_destroy(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_unregister);
+
+/**
+ * device_wakeup_attach - Attach a wakeup source object to a device object.
+ * @dev: Device to handle.
+ * @ws: Wakeup source object to attach to @dev.
+ *
+ * This causes @dev to be treated as a wakeup device.
+ */
+static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ spin_unlock_irq(&dev->power.lock);
+ return -EEXIST;
+ }
+ dev->power.wakeup = ws;
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
+}
+
+/**
+ * device_wakeup_enable - Enable given device to be a wakeup source.
+ * @dev: Device to handle.
+ *
+ * Create a wakeup source object, register it and attach it to @dev.
+ */
+int device_wakeup_enable(struct device *dev)
+{
+ struct wakeup_source *ws;
+ int ret;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ ws = wakeup_source_register(dev_name(dev));
+ if (!ws)
+ return -ENOMEM;
+
+ ret = device_wakeup_attach(dev, ws);
+ if (ret)
+ wakeup_source_unregister(ws);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_enable);
+
+/**
+ * device_wakeup_detach - Detach a device's wakeup source object from it.
+ * @dev: Device to detach the wakeup source object from.
+ *
+ * After it returns, @dev will not be treated as a wakeup device any more.
+ */
+static struct wakeup_source *device_wakeup_detach(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ spin_lock_irq(&dev->power.lock);
+ ws = dev->power.wakeup;
+ dev->power.wakeup = NULL;
+ spin_unlock_irq(&dev->power.lock);
+ return ws;
+}
+
+/**
+ * device_wakeup_disable - Do not regard a device as a wakeup source any more.
+ * @dev: Device to handle.
+ *
+ * Detach the @dev's wakeup source object from it, unregister this wakeup source
+ * object and destroy it.
+ */
+int device_wakeup_disable(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ ws = device_wakeup_detach(dev);
+ if (ws)
+ wakeup_source_unregister(ws);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_disable);
+
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc.
+ */
+int device_init_wakeup(struct device *dev, bool enable)
+{
+ int ret = 0;
+
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ ret = device_wakeup_enable(dev);
+ } else {
+ device_set_wakeup_capable(dev, false);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_init_wakeup);
+
+/**
+ * device_set_wakeup_enable - Enable or disable a device to wake up the system.
+ * @dev: Device to handle.
+ */
+int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
+
+/*
+ * The functions below use the observation that each wakeup event starts a
+ * period in which the system should not be suspended. The moment this period
+ * will end depends on how the wakeup event is going to be processed after being
+ * detected and all of the possible cases can be divided into two distinct
+ * groups.
+ *
+ * First, a wakeup event may be detected by the same functional unit that will
+ * carry out the entire processing of it and possibly will pass it to user space
+ * for further processing. In that case the functional unit that has detected
+ * the event may later "close" the "no suspend" period associated with it
+ * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
+ * pm_relax(), balanced with each other, is supposed to be used in such
+ * situations.
+ *
+ * Second, a wakeup event may be detected by one functional unit and processed
+ * by another one. In that case the unit that has detected it cannot really
+ * "close" the "no suspend" period associated with it, unless it knows in
+ * advance what's going to happen to the event during processing. This
+ * knowledge, however, may not be available to it, so it can simply specify time
+ * to wait before the system can be suspended and pass it as the second
+ * argument of pm_wakeup_event().
+ *
+ * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
+ * "no suspend" period will be ended either by the pm_relax(), or by the timer
+ * function executed when the timer expires, whichever comes first.
+ */
+
+/**
+ * wakup_source_activate - Mark given wakeup source as active.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and, if @ws has just been activated, notify the PM
+ * core of the event by incrementing the counter of of wakeup events being
+ * processed.
+ */
+static void wakeup_source_activate(struct wakeup_source *ws)
+{
+ ws->active = true;
+ ws->active_count++;
+ ws->timer_expires = jiffies;
+ ws->last_time = ktime_get();
+
+ atomic_inc(&events_in_progress);
+}
+
+/**
+ * __pm_stay_awake - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_stay_awake(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+ ws->event_count++;
+ if (!ws->active)
+ wakeup_source_activate(ws);
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_stay_awake);
+
+/**
+ * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
+ * @dev: Device the wakeup event is related to.
+ *
+ * Notify the PM core of a wakeup event (signaled by @dev) by calling
+ * __pm_stay_awake for the @dev's wakeup source object.
+ *
+ * Call this function after detecting of a wakeup event if pm_relax() is going
+ * to be called directly after processing the event (and possibly passing it to
+ * user space for further processing).
+ */
+void pm_stay_awake(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_stay_awake(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_stay_awake);
+
+/**
+ * wakup_source_deactivate - Mark given wakeup source as inactive.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and notify the PM core that the wakeup source has
+ * become inactive by decrementing the counter of wakeup events being processed
+ * and incrementing the counter of registered wakeup events.
+ */
+static void wakeup_source_deactivate(struct wakeup_source *ws)
+{
+ ktime_t duration;
+ ktime_t now;
+
+ ws->relax_count++;
+ /*
+ * __pm_relax() may be called directly or from a timer function.
+ * If it is called directly right after the timer function has been
+ * started, but before the timer function calls __pm_relax(), it is
+ * possible that __pm_stay_awake() will be called in the meantime and
+ * will set ws->active. Then, ws->active may be cleared immediately
+ * by the __pm_relax() called from the timer function, but in such a
+ * case ws->relax_count will be different from ws->active_count.
+ */
+ if (ws->relax_count != ws->active_count) {
+ ws->relax_count--;
+ return;
+ }
+
+ ws->active = false;
+
+ now = ktime_get();
+ duration = ktime_sub(now, ws->last_time);
+ ws->total_time = ktime_add(ws->total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
+ ws->max_time = duration;
+
+ del_timer(&ws->timer);
+
+ /*
+ * event_count has to be incremented before events_in_progress is
+ * modified, so that the callers of pm_check_wakeup_events() and
+ * pm_save_wakeup_count() don't see the old value of event_count and
+ * events_in_progress equal to zero at the same time.
+ */
+ atomic_inc(&event_count);
+ smp_mb__before_atomic_dec();
+ atomic_dec(&events_in_progress);
+}
+
+/**
+ * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * Call this function for wakeup events whose processing started with calling
+ * __pm_stay_awake().
+ *
+ * It is safe to call it from interrupt context.
+ */
+void __pm_relax(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+ if (ws->active)
+ wakeup_source_deactivate(ws);
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_relax);
+
+/**
+ * pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @dev: Device that signaled the event.
+ *
+ * Execute __pm_relax() for the @dev's wakeup source object.
+ */
+void pm_relax(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_relax(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_relax);
+
+/**
+ * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
+ * @data: Address of the wakeup source object associated with the event source.
+ *
+ * Call __pm_relax() for the wakeup source whose address is stored in @data.
+ */
+static void pm_wakeup_timer_fn(unsigned long data)
+{
+ __pm_relax((struct wakeup_source *)data);
+}
+
+/**
+ * __pm_wakeup_event - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the event source.
+ * @msec: Anticipated event processing time (in milliseconds).
+ *
+ * Notify the PM core of a wakeup event whose source is @ws that will take
+ * approximately @msec milliseconds to be processed by the kernel. If @ws is
+ * not active, activate it. If @msec is nonzero, set up the @ws' timer to
+ * execute pm_wakeup_timer_fn() in future.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
+{
+ unsigned long flags;
+ unsigned long expires;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ ws->event_count++;
+ if (!ws->active)
+ wakeup_source_activate(ws);
+
+ if (!msec) {
+ wakeup_source_deactivate(ws);
+ goto unlock;
+ }
+
+ expires = jiffies + msecs_to_jiffies(msec);
+ if (!expires)
+ expires = 1;
+
+ if (time_after(expires, ws->timer_expires)) {
+ mod_timer(&ws->timer, expires);
+ ws->timer_expires = expires;
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_wakeup_event);
+
+
+/**
+ * pm_wakeup_event - Notify the PM core of a wakeup event.
+ * @dev: Device the wakeup event is related to.
+ * @msec: Anticipated event processing time (in milliseconds).
+ *
+ * Call __pm_wakeup_event() for the @dev's wakeup source object.
+ */
+void pm_wakeup_event(struct device *dev, unsigned int msec)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_wakeup_event(dev->power.wakeup, msec);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_event);
+
+/**
+ * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
+ */
+static void pm_wakeup_update_hit_counts(void)
+{
+ unsigned long flags;
+ struct wakeup_source *ws;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ spin_lock_irqsave(&ws->lock, flags);
+ if (ws->active)
+ ws->hit_count++;
+ spin_unlock_irqrestore(&ws->lock, flags);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * pm_check_wakeup_events - Check for new wakeup events.
+ *
+ * Compare the current number of registered wakeup events with its preserved
+ * value from the past to check if new wakeup events have been registered since
+ * the old value was stored. Check if the current number of wakeup events being
+ * processed is zero.
+ */
+bool pm_check_wakeup_events(void)
+{
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&events_lock, flags);
+ if (events_check_enabled) {
+ ret = ((unsigned int)atomic_read(&event_count) == saved_count)
+ && !atomic_read(&events_in_progress);
+ events_check_enabled = ret;
+ }
+ spin_unlock_irqrestore(&events_lock, flags);
+ if (!ret)
+ pm_wakeup_update_hit_counts();
+ return ret;
+}
+
+/**
+ * pm_get_wakeup_count - Read the number of registered wakeup events.
+ * @count: Address to store the value at.
+ *
+ * Store the number of registered wakeup events at the address in @count. Block
+ * if the current number of wakeup events being processed is nonzero.
+ *
+ * Return false if the wait for the number of wakeup events being processed to
+ * drop down to zero has been interrupted by a signal (and the current number
+ * of wakeup events being processed is still nonzero). Otherwise return true.
+ */
+bool pm_get_wakeup_count(unsigned int *count)
+{
+ bool ret;
+
+ if (capable(CAP_SYS_ADMIN))
+ events_check_enabled = false;
+
+ while (atomic_read(&events_in_progress) && !signal_pending(current)) {
+ pm_wakeup_update_hit_counts();
+ schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
+ }
+
+ ret = !atomic_read(&events_in_progress);
+ *count = atomic_read(&event_count);
+ return ret;
+}
+
+/**
+ * pm_save_wakeup_count - Save the current number of registered wakeup events.
+ * @count: Value to compare with the current number of registered wakeup events.
+ *
+ * If @count is equal to the current number of registered wakeup events and the
+ * current number of wakeup events being processed is zero, store @count as the
+ * old number of registered wakeup events to be used by pm_check_wakeup_events()
+ * and return true. Otherwise return false.
+ */
+bool pm_save_wakeup_count(unsigned int count)
+{
+ bool ret = false;
+
+ spin_lock_irq(&events_lock);
+ if (count == (unsigned int)atomic_read(&event_count)
+ && !atomic_read(&events_in_progress)) {
+ saved_count = count;
+ events_check_enabled = true;
+ ret = true;
+ }
+ spin_unlock_irq(&events_lock);
+ if (!ret)
+ pm_wakeup_update_hit_counts();
+ return ret;
+}
+
+static struct dentry *wakeup_sources_stats_dentry;
+
+/**
+ * print_wakeup_source_stats - Print wakeup source statistics information.
+ * @m: seq_file to print the statistics into.
+ * @ws: Wakeup source object to print the statistics for.
+ */
+static int print_wakeup_source_stats(struct seq_file *m,
+ struct wakeup_source *ws)
+{
+ unsigned long flags;
+ ktime_t total_time;
+ ktime_t max_time;
+ unsigned long active_count;
+ ktime_t active_time;
+ int ret;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ total_time = ws->total_time;
+ max_time = ws->max_time;
+ active_count = ws->active_count;
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ if (active_time.tv64 > max_time.tv64)
+ max_time = active_time;
+ } else {
+ active_time = ktime_set(0, 0);
+ }
+
+ ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
+ "%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ ws->name, active_count, ws->event_count, ws->hit_count,
+ ktime_to_ms(active_time), ktime_to_ms(total_time),
+ ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+
+ return ret;
+}
+
+/**
+ * wakeup_sources_stats_show - Print wakeup sources statistics information.
+ * @m: seq_file to print the statistics into.
+ */
+static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
+{
+ struct wakeup_source *ws;
+
+ seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
+ "active_since\ttotal_time\tmax_time\tlast_change\n");
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ print_wakeup_source_stats(m, ws);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wakeup_sources_stats_show, NULL);
+}
+
+static const struct file_operations wakeup_sources_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = wakeup_sources_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init wakeup_sources_debugfs_init(void)
+{
+ wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
+ S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
+ return 0;
+}
+
+postcore_initcall(wakeup_sources_debugfs_init);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 0d903909af7e..1667aaf4fde6 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pm.h>
#include <linux/device.h>
@@ -54,7 +53,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
}
-static struct sysfs_ops sysfs_ops = {
+static const struct sysfs_ops sysfs_ops = {
.show = sysdev_show,
.store = sysdev_store,
};
@@ -89,7 +88,7 @@ static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr,
struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr);
if (class_attr->show)
- return class_attr->show(class, buffer);
+ return class_attr->show(class, class_attr, buffer);
return -EIO;
}
@@ -100,11 +99,11 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr);
if (class_attr->store)
- return class_attr->store(class, buffer, count);
+ return class_attr->store(class, class_attr, buffer, count);
return -EIO;
}
-static struct sysfs_ops sysfs_class_ops = {
+static const struct sysfs_ops sysfs_class_ops = {
.show = sysdev_class_show,
.store = sysdev_class_store,
};
@@ -145,13 +144,20 @@ int sysdev_class_register(struct sysdev_class *cls)
if (retval)
return retval;
- return kset_register(&cls->kset);
+ retval = kset_register(&cls->kset);
+ if (!retval && cls->attrs)
+ retval = sysfs_create_files(&cls->kset.kobj,
+ (const struct attribute **)cls->attrs);
+ return retval;
}
void sysdev_class_unregister(struct sysdev_class *cls)
{
pr_debug("Unregistering sysdev class '%s'\n",
kobject_name(&cls->kset.kobj));
+ if (cls->attrs)
+ sysfs_remove_files(&cls->kset.kobj,
+ (const struct attribute **)cls->attrs);
kset_unregister(&cls->kset);
}
@@ -426,13 +432,13 @@ int sysdev_suspend(pm_message_t state)
/* resume current sysdev */
cls_driver:
drv = NULL;
- printk(KERN_ERR "Class suspend failed for %s\n",
- kobject_name(&sysdev->kobj));
+ printk(KERN_ERR "Class suspend failed for %s: %d\n",
+ kobject_name(&sysdev->kobj), ret);
aux_driver:
if (drv)
- printk(KERN_ERR "Class driver suspend failed for %s\n",
- kobject_name(&sysdev->kobj));
+ printk(KERN_ERR "Class driver suspend failed for %s: %d\n",
+ kobject_name(&sysdev->kobj), ret);
list_for_each_entry(err_drv, &cls->drivers, entry) {
if (err_drv == drv)
break;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..f6f37a05a0c3 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -45,7 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
}
-#if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
+#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
+ defined(topology_book_cpumask)
static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
{
ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
@@ -114,6 +115,14 @@ define_siblings_show_func(core_cpumask);
define_one_ro_named(core_siblings, show_core_cpumask);
define_one_ro_named(core_siblings_list, show_core_cpumask_list);
+#ifdef CONFIG_SCHED_BOOK
+define_id_show_func(book_id);
+define_one_ro(book_id);
+define_siblings_show_func(book_cpumask);
+define_one_ro_named(book_siblings, show_book_cpumask);
+define_one_ro_named(book_siblings_list, show_book_cpumask_list);
+#endif
+
static struct attribute *default_attrs[] = {
&attr_physical_package_id.attr,
&attr_core_id.attr,
@@ -121,6 +130,11 @@ static struct attribute *default_attrs[] = {
&attr_thread_siblings_list.attr,
&attr_core_siblings.attr,
&attr_core_siblings_list.attr,
+#ifdef CONFIG_SCHED_BOOK
+ &attr_book_id.attr,
+ &attr_book_siblings.attr,
+ &attr_book_siblings_list.attr,
+#endif
NULL
};
@@ -162,7 +176,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
topology_remove_dev(cpu);
break;
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(rc);
}
static int __cpuinit topology_sysfs_init(void)