summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c11
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c44
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/power/domain.c194
-rw-r--r--drivers/base/power/domain_governor.c12
-rw-r--r--drivers/base/power/sysfs.c9
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c52
-rw-r--r--drivers/base/regmap/regmap-irq.c53
-rw-r--r--drivers/base/regmap/regmap.c141
13 files changed, 281 insertions, 249 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 4d0a0038b476..75f72d684294 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -54,6 +54,17 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
+
+void topology_set_thermal_pressure(const struct cpumask *cpus,
+ unsigned long th_pressure)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpus)
+ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+}
+
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 95c22c0f9036..40fb069a8a7e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -153,7 +153,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
-extern void driver_deferred_probe_force_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 22d83aedb64e..50041a8e9821 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock);
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static unsigned int defer_fw_devlink_count;
+static LIST_HEAD(deferred_fw_devlink);
static DEFINE_MUTEX(defer_fw_devlink_lock);
static bool fw_devlink_is_permissive(void);
@@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev,
*/
dev->state_synced = true;
- if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+ if (WARN_ON(!list_empty(&dev->links.defer_hook)))
return;
get_device(dev);
- list_add_tail(&dev->links.defer_sync, list);
+ list_add_tail(&dev->links.defer_hook, list);
}
/**
@@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list,
{
struct device *dev, *tmp;
- list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
- list_del_init(&dev->links.defer_sync);
+ list_for_each_entry_safe(dev, tmp, list, links.defer_hook) {
+ list_del_init(&dev->links.defer_hook);
if (dev != dont_lock_dev)
device_lock(dev);
@@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void)
if (defer_sync_state_count)
goto out;
- list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+ list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) {
/*
* Delete from deferred_sync list before queuing it to
- * sync_list because defer_sync is used for both lists.
+ * sync_list because defer_hook is used for both lists.
*/
- list_del_init(&dev->links.defer_sync);
+ list_del_init(&dev->links.defer_hook);
__device_links_queue_sync_state(dev, &sync_list);
}
out:
@@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall);
static void __device_links_supplier_defer_sync(struct device *sup)
{
- if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
- list_add_tail(&sup->links.defer_sync, &deferred_sync);
+ if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup))
+ list_add_tail(&sup->links.defer_hook, &deferred_sync);
}
static void device_link_drop_managed(struct device_link *link)
@@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
- list_del_init(&dev->links.defer_sync);
+ list_del_init(&dev->links.defer_hook);
__device_links_no_driver(dev);
device_links_write_unlock();
@@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev)
fw_ret = -EAGAIN;
} else {
fw_ret = -ENODEV;
+ /*
+ * defer_hook is not used to add device to deferred_sync list
+ * until device is bound. Since deferred fw devlink also blocks
+ * probing, same list hook can be used for deferred_fw_devlink.
+ */
+ list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink);
}
if (fw_ret == -ENODEV)
@@ -1312,6 +1319,9 @@ void fw_devlink_pause(void)
*/
void fw_devlink_resume(void)
{
+ struct device *dev, *tmp;
+ LIST_HEAD(probe_list);
+
mutex_lock(&defer_fw_devlink_lock);
if (!defer_fw_devlink_count) {
WARN(true, "Unmatched fw_devlink pause/resume!");
@@ -1323,9 +1333,19 @@ void fw_devlink_resume(void)
goto out;
device_link_add_missing_supplier_links();
- driver_deferred_probe_force_trigger();
+ list_splice_tail_init(&deferred_fw_devlink, &probe_list);
out:
mutex_unlock(&defer_fw_devlink_lock);
+
+ /*
+ * bus_probe_device() can cause new devices to get added and they'll
+ * try to grab defer_fw_devlink_lock. So, this needs to be done outside
+ * the defer_fw_devlink_lock.
+ */
+ list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) {
+ list_del_init(&dev->links.defer_hook);
+ bus_probe_device(dev);
+ }
}
/* Device links support end. */
@@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.needs_suppliers);
- INIT_LIST_HEAD(&dev->links.defer_sync);
+ INIT_LIST_HEAD(&dev->links.defer_hook);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9a1d940342ac..48ca81cb8ebc 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -164,11 +164,6 @@ static void driver_deferred_probe_trigger(void)
if (!driver_deferred_probe_enable)
return;
- driver_deferred_probe_force_trigger();
-}
-
-void driver_deferred_probe_force_trigger(void)
-{
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0a01df608849..2cb5e04cf86c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
/*
* Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state
- * field is protected by the master genpd->lock, which is already taken.
+ * field is protected by the parent genpd->lock, which is already taken.
*
* Also note that link->performance_state (subdomain's performance state
- * requirement to master domain) is different from
- * link->slave->performance_state (current performance state requirement
+ * requirement to parent domain) is different from
+ * link->child->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a
* different value.
*
* Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (link->performance_state > state)
state = link->performance_state;
}
@@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
- struct generic_pm_domain *master;
+ struct generic_pm_domain *parent;
struct gpd_link *link;
- int master_state, ret;
+ int parent_state, ret;
if (state == genpd->performance_state)
return 0;
- /* Propagate to masters of genpd */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- master = link->master;
+ /* Propagate to parents of genpd */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- /* Find master's performance state */
+ /* Find parent's performance state */
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- master->opp_table,
+ parent->opp_table,
state);
if (unlikely(ret < 0))
goto err;
- master_state = ret;
+ parent_state = ret;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
link->prev_performance_state = link->performance_state;
- link->performance_state = master_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ link->performance_state = parent_state;
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
if (ret)
link->performance_state = link->prev_performance_state;
- genpd_unlock(master);
+ genpd_unlock(parent);
if (ret)
goto err;
@@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
err:
/* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->slave_links,
- slave_node) {
- master = link->master;
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
+ child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
- master_state = link->prev_performance_state;
- link->performance_state = master_state;
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n",
- master->name, master_state);
+ parent->name, parent_state);
}
- genpd_unlock(master);
+ genpd_unlock(parent);
}
return ret;
@@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the master yet after
+ * managed to call genpd_power_on() for the parent yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't
@@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
genpd->status = GPD_STATE_POWER_OFF;
genpd_update_accounting(genpd);
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return 0;
}
/**
- * genpd_power_on - Restore power to a given PM domain and its masters.
+ * genpd_power_on - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
- * Restore power to @genpd and all of its masters so that it is possible to
+ * Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
@@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
/*
* The list is guaranteed not to change while the loop below is being
- * executed, unless one of the masters' .power_on() callbacks fiddles
+ * executed, unless one of the parents' .power_on() callbacks fiddles
* with it.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_sd_counter_inc(master);
+ genpd_sd_counter_inc(parent);
- genpd_lock_nested(master, depth + 1);
- ret = genpd_power_on(master, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ ret = genpd_power_on(parent, depth + 1);
+ genpd_unlock(parent);
if (ret) {
- genpd_sd_counter_dec(master);
+ genpd_sd_counter_dec(parent);
goto err;
}
}
@@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
err:
list_for_each_entry_continue_reverse(link,
- &genpd->slave_links,
- slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ &genpd->child_links,
+ child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return ret;
@@ -932,13 +932,13 @@ late_initcall(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
/**
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
* @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its masters.
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
@@ -963,21 +963,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
genpd->status = GPD_STATE_POWER_OFF;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_off(link->master, use_lock, depth + 1);
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
}
/**
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
* @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
@@ -994,16 +994,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
if (genpd_status_on(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_inc(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_on(link->master, use_lock, depth + 1);
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
_genpd_power_on(genpd, false);
@@ -1443,12 +1443,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
if (!genpd_is_cpu_domain(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_lock_nested(master, depth + 1);
- genpd_update_cpumask(master, cpu, set, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
+ genpd_unlock(parent);
}
if (set)
@@ -1636,17 +1636,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(itr, &genpd->master_links, master_node) {
- if (itr->slave == subdomain && itr->master == genpd) {
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+ if (itr->child == subdomain && itr->parent == genpd) {
ret = -EINVAL;
goto out;
}
}
- link->master = genpd;
- list_add_tail(&link->master_node, &genpd->master_links);
- link->slave = subdomain;
- list_add_tail(&link->slave_node, &subdomain->slave_links);
+ link->parent = genpd;
+ list_add_tail(&link->parent_node, &genpd->parent_links);
+ link->child = subdomain;
+ list_add_tail(&link->child_node, &subdomain->child_links);
if (genpd_status_on(subdomain))
genpd_sd_counter_inc(genpd);
@@ -1660,7 +1660,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
+ * @genpd: Leader PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
*/
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
@@ -1678,7 +1678,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
+ * @genpd: Leader PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
*/
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
@@ -1693,19 +1693,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n",
genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
- list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
- if (link->slave != subdomain)
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+ if (link->child != subdomain)
continue;
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
if (genpd_status_on(subdomain))
genpd_sd_counter_dec(genpd);
@@ -1770,8 +1770,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- INIT_LIST_HEAD(&genpd->master_links);
- INIT_LIST_HEAD(&genpd->slave_links);
+ INIT_LIST_HEAD(&genpd->parent_links);
+ INIT_LIST_HEAD(&genpd->child_links);
INIT_LIST_HEAD(&genpd->dev_list);
genpd_lock_init(genpd);
genpd->gov = gov;
@@ -1848,15 +1848,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
return -EBUSY;
}
- if (!list_empty(&genpd->master_links) || genpd->device_count) {
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
- list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
}
@@ -2827,12 +2827,12 @@ static int genpd_summary_one(struct seq_file *s,
/*
* Modifications on the list require holding locks on both
- * master and slave, so we are safe.
+ * parent and child, so we are safe.
* Also genpd->name is immutable.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- seq_printf(s, "%s", link->slave->name);
- if (!list_is_last(&link->master_node, &genpd->master_links))
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ seq_printf(s, "%s", link->child->name);
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
}
@@ -2860,7 +2860,7 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status slaves\n");
+ seq_puts(s, "domain status children\n");
seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n");
@@ -2915,8 +2915,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- list_for_each_entry(link, &genpd->master_links, master_node)
- seq_printf(s, "%s\n", link->slave->name);
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
+ seq_printf(s, "%s\n", link->child->name);
genpd_unlock(genpd);
return ret;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index daa8c7689f7e..490ed7deb99a 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*
* All subdomains have been powered off already at this point.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- struct generic_pm_domain *sd = link->slave;
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *sd = link->child;
s64 sd_max_off_ns = sd->max_off_time_ns;
if (sd_max_off_ns < 0)
@@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
}
/*
- * We have to invalidate the cached results for the masters, so
+ * We have to invalidate the cached results for the parents, so
* use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
+ * going to be called for any parent until this instance
* returns.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
+ list_for_each_entry(link, &genpd->child_links, child_node)
+ link->parent->max_off_time_changed = true;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 24d25cf8ab14..c7b24812523c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* sysfs entries for device PM */
#include <linux/device.h>
+#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
@@ -739,12 +740,18 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
int wakeup_sysfs_add(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+
+ if (!ret)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ return ret;
}
void wakeup_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
int pm_qos_sysfs_add_resume_latency(struct device *dev)
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 977d27bd1a22..a97f33d0c59f 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = {
.notifier_call = pm_trace_notify,
};
-static int early_resume_init(void)
+static int __init early_resume_init(void)
{
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
}
-static int late_resume_init(void)
+static int __init late_resume_init(void)
{
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 1e6d75e65938..d58aa98fe964 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
return next;
/* When no more children in primary, continue with secondary */
- if (!IS_ERR_OR_NULL(fwnode->secondary))
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
next = fwnode_get_next_child_node(fwnode->secondary, child);
return next;
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0fd6f97ee523..1d1d26b0d279 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,7 +4,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C)
select IRQ_DOMAIN if REGMAP_IRQ
bool
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 089e5dc7144a..f58baff2be0a 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_only);
- ssize_t result;
- bool was_enabled, require_sync = false;
+ bool new_val, require_sync = false;
int err;
- map->lock(map->lock_arg);
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
- was_enabled = map->cache_only;
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0) {
- map->unlock(map->lock_arg);
- return result;
- }
+ map->lock(map->lock_arg);
- if (map->cache_only && !was_enabled) {
+ if (new_val && !map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_only && was_enabled) {
+ } else if (!new_val && map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
require_sync = true;
}
+ map->cache_only = new_val;
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
dev_err(map->dev, "Failed to sync cache %d\n", err);
}
- return result;
+ return count;
}
static const struct file_operations regmap_cache_only_fops = {
@@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_bypass);
- ssize_t result;
- bool was_enabled;
+ bool new_val;
+ int err;
- map->lock(map->lock_arg);
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
- was_enabled = map->cache_bypass;
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0)
- goto out;
+ map->lock(map->lock_arg);
- if (map->cache_bypass && !was_enabled) {
+ if (new_val && !map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_bypass && was_enabled) {
+ } else if (!new_val && map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
}
+ map->cache_bypass = new_val;
-out:
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
- return result;
+ return count;
}
static const struct file_operations regmap_cache_bypass_fops = {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4340e1d268b6..369a57e6f89d 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -541,9 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
- * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling
+ * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
- * @np: The device_node where the IRQ domain should be added to.
+ * @fwnode: The firmware node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts.
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -557,10 +557,11 @@ static const struct irq_domain_ops regmap_domain_ops = {
* register cache. The chip driver is responsible for restoring the
* register values used by the IRQ controller over suspend and resume.
*/
-int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
- int irq_flags, int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
int i;
@@ -771,10 +772,12 @@ int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
}
if (irq_base)
- d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base,
+ d->domain = irq_domain_add_legacy(to_of_node(fwnode),
+ chip->num_irqs, irq_base,
0, &regmap_domain_ops, d);
else
- d->domain = irq_domain_add_linear(np, chip->num_irqs,
+ d->domain = irq_domain_add_linear(to_of_node(fwnode),
+ chip->num_irqs,
&regmap_domain_ops, d);
if (!d->domain) {
dev_err(map->dev, "Failed to create IRQ domain\n");
@@ -808,7 +811,7 @@ err_alloc:
kfree(d);
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np);
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
/**
* regmap_add_irq_chip() - Use standard regmap IRQ controller handling
@@ -822,15 +825,15 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np);
*
* Returns 0 on success or an errno on failure.
*
- * This is the same as regmap_add_irq_chip_np, except that the device
+ * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
* node of the regmap is used.
*/
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
- return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags,
- irq_base, chip, data);
+ return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
+ irq_flags, irq_base, chip, data);
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
@@ -899,10 +902,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np()
+ * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
*
* @dev: The device pointer on which irq_chip belongs to.
- * @np: The device_node where the IRQ domain should be added to.
+ * @fwnode: The firmware node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -915,11 +918,12 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
* The &regmap_irq_chip_data will be automatically released when the device is
* unbound.
*/
-int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
- struct regmap *map, int irq, int irq_flags,
- int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data **ptr, *d;
int ret;
@@ -929,8 +933,8 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
if (!ptr)
return -ENOMEM;
- ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base,
- chip, &d);
+ ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
+ chip, &d);
if (ret < 0) {
devres_free(ptr);
return ret;
@@ -941,7 +945,7 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
*data = d;
return 0;
}
-EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np);
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
* devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
@@ -964,8 +968,9 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
- return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq,
- irq_flags, irq_base, chip, data);
+ return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
+ irq, irq_flags, irq_base, chip,
+ data);
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c472f624382d..e93700af7e6e 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -11,12 +11,13 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
- __be16 *b = buf;
-
- b[0] = cpu_to_be16(val << shift);
+ put_unaligned_be16(val << shift, buf);
}
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
{
- __le16 *b = buf;
-
- b[0] = cpu_to_le16(val << shift);
+ put_unaligned_le16(val << shift, buf);
}
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u16 *)buf = val << shift;
+ u16 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
- __be32 *b = buf;
-
- b[0] = cpu_to_be32(val << shift);
+ put_unaligned_be32(val << shift, buf);
}
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
{
- __le32 *b = buf;
-
- b[0] = cpu_to_le32(val << shift);
+ put_unaligned_le32(val << shift, buf);
}
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u32 *)buf = val << shift;
+ u32 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#ifdef CONFIG_64BIT
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
{
- __be64 *b = buf;
-
- b[0] = cpu_to_be64((u64)val << shift);
+ put_unaligned_be64((u64) val << shift, buf);
}
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
{
- __le64 *b = buf;
-
- b[0] = cpu_to_le64((u64)val << shift);
+ put_unaligned_le64((u64) val << shift, buf);
}
static void regmap_format_64_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u64 *)buf = (u64)val << shift;
+ u64 v = (u64) val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#endif
@@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf)
static unsigned int regmap_parse_16_be(const void *buf)
{
- const __be16 *b = buf;
-
- return be16_to_cpu(b[0]);
+ return get_unaligned_be16(buf);
}
static unsigned int regmap_parse_16_le(const void *buf)
{
- const __le16 *b = buf;
-
- return le16_to_cpu(b[0]);
+ return get_unaligned_le16(buf);
}
static void regmap_parse_16_be_inplace(void *buf)
{
- __be16 *b = buf;
+ u16 v = get_unaligned_be16(buf);
- b[0] = be16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_16_le_inplace(void *buf)
{
- __le16 *b = buf;
+ u16 v = get_unaligned_le16(buf);
- b[0] = le16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_16_native(const void *buf)
{
- return *(u16 *)buf;
+ u16 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
static unsigned int regmap_parse_24(const void *buf)
@@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf)
static unsigned int regmap_parse_32_be(const void *buf)
{
- const __be32 *b = buf;
-
- return be32_to_cpu(b[0]);
+ return get_unaligned_be32(buf);
}
static unsigned int regmap_parse_32_le(const void *buf)
{
- const __le32 *b = buf;
-
- return le32_to_cpu(b[0]);
+ return get_unaligned_le32(buf);
}
static void regmap_parse_32_be_inplace(void *buf)
{
- __be32 *b = buf;
+ u32 v = get_unaligned_be32(buf);
- b[0] = be32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_32_le_inplace(void *buf)
{
- __le32 *b = buf;
+ u32 v = get_unaligned_le32(buf);
- b[0] = le32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_32_native(const void *buf)
{
- return *(u32 *)buf;
+ u32 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#ifdef CONFIG_64BIT
static unsigned int regmap_parse_64_be(const void *buf)
{
- const __be64 *b = buf;
-
- return be64_to_cpu(b[0]);
+ return get_unaligned_be64(buf);
}
static unsigned int regmap_parse_64_le(const void *buf)
{
- const __le64 *b = buf;
-
- return le64_to_cpu(b[0]);
+ return get_unaligned_le64(buf);
}
static void regmap_parse_64_be_inplace(void *buf)
{
- __be64 *b = buf;
+ u64 v = get_unaligned_be64(buf);
- b[0] = be64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_64_le_inplace(void *buf)
{
- __le64 *b = buf;
+ u64 v = get_unaligned_le64(buf);
- b[0] = le64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_64_native(const void *buf)
{
- return *(u64 *)buf;
+ u64 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#endif
@@ -639,7 +631,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_bus *bus,
const struct regmap_config *config)
{
- struct device_node *np;
+ struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
enum regmap_endian endian;
/* Retrieve the endianness specification from the regmap config */
@@ -649,22 +641,17 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
- /* If the dev and dev->of_node exist try to get endianness from DT */
- if (dev && dev->of_node) {
- np = dev->of_node;
-
- /* Parse the device's DT node for an endianness specification */
- if (of_property_read_bool(np, "big-endian"))
- endian = REGMAP_ENDIAN_BIG;
- else if (of_property_read_bool(np, "little-endian"))
- endian = REGMAP_ENDIAN_LITTLE;
- else if (of_property_read_bool(np, "native-endian"))
- endian = REGMAP_ENDIAN_NATIVE;
-
- /* If the endianness was specified in DT, use that */
- if (endian != REGMAP_ENDIAN_DEFAULT)
- return endian;
- }
+ /* If the firmware node exist try to get endianness from it */
+ if (fwnode_property_read_bool(fwnode, "big-endian"))
+ endian = REGMAP_ENDIAN_BIG;
+ else if (fwnode_property_read_bool(fwnode, "little-endian"))
+ endian = REGMAP_ENDIAN_LITTLE;
+ else if (fwnode_property_read_bool(fwnode, "native-endian"))
+ endian = REGMAP_ENDIAN_NATIVE;
+
+ /* If the endianness was specified in fwnode, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
/* Retrieve the endianness specification from the bus config */
if (bus && bus->val_format_endian_default)
@@ -1357,6 +1344,7 @@ void regmap_exit(struct regmap *map)
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
+ kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1371,7 +1359,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
- return (*r)->name == data;
+ return !strcmp((*r)->name, data);
else
return 1;
}
@@ -2031,7 +2019,7 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
{
@@ -2944,8 +2932,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
* @reg: Register to read from
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{