summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2024-04-12 15:14:25 +0200
committerThomas Hellström <thomas.hellstrom@linux.intel.com>2024-04-12 15:14:25 +0200
commit79790b6818e96c58fe2bffee1b418c16e64e7b80 (patch)
tree1b1f34807410ae05c17658c768f45f951e9420bc /drivers/base
parent12f4b58a37f48a049893f1ae04b3fbb9b5088e8c (diff)
parent6e1f415e7129f7cd4c2394af83b35cdcdd40baf7 (diff)
Merge drm/drm-next into drm-xe-next
Backmerging drm-next in order to get up-to-date and in particular to access commit 9ca5facd0400f610f3f7f71aeb7fc0b949a48c67. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c13
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/cacheinfo.c50
-rw-r--r--drivers/base/component.c4
-rw-r--r--drivers/base/core.c98
-rw-r--r--drivers/base/cpu.c11
-rw-r--r--drivers/base/dd.c32
-rw-r--r--drivers/base/firmware_loader/main.c16
-rw-r--r--drivers/base/memory.c23
-rw-r--r--drivers/base/node.c7
-rw-r--r--drivers/base/platform-msi.c125
-rw-r--r--drivers/base/power/common.c134
-rw-r--r--drivers/base/power/main.c267
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/property.c67
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache.c4
-rw-r--r--drivers/base/regmap/regmap-kunit.c123
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/base/swnode.c13
22 files changed, 730 insertions, 312 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 018ac202de34..024b78a0cfc1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (!raw_capacity)
- return 0;
-
if (val != CPUFREQ_CREATE_POLICY)
return 0;
@@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
}
if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- schedule_work(&update_topology_flags_work);
- free_raw_capacity();
+ if (raw_capacity) {
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ free_raw_capacity();
+ }
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
}
@@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void)
* On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization.
*/
- if (!acpi_disabled || !raw_capacity)
+ if (!acpi_disabled)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
diff --git a/drivers/base/base.h b/drivers/base/base.h
index eb4c0ace9242..0738ccad08b2 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -207,7 +207,7 @@ static inline int devtmpfs_init(void) { return 0; }
#endif
#ifdef CONFIG_BLOCK
-extern struct class block_class;
+extern const struct class block_class;
static inline bool is_blockdev(struct device *dev)
{
return dev->class == &block_class;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index f1e79263fe61..23b8cba4a2a3 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -898,6 +898,37 @@ err:
return rc;
}
+static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu,
+ cpumask_t **map)
+{
+ struct cacheinfo *llc, *sib_llc;
+ unsigned int sibling;
+
+ if (!last_level_cache_is_valid(cpu))
+ return 0;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
+ return 0;
+
+ if (online) {
+ *map = &llc->shared_cpu_map;
+ return cpumask_weight(*map);
+ }
+
+ /* shared_cpu_map of offlined CPU will be cleared, so use sibling map */
+ for_each_cpu(sibling, &llc->shared_cpu_map) {
+ if (sibling == cpu || !last_level_cache_is_valid(sibling))
+ continue;
+ sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1);
+ *map = &sib_llc->shared_cpu_map;
+ return cpumask_weight(*map);
+ }
+
+ return 0;
+}
+
/*
* Calculate the size of the per-CPU data cache slice. This can be
* used to estimate the size of the data cache slice that can be used
@@ -929,28 +960,31 @@ static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
ci->per_cpu_data_slice_size = llc->size / nr_shared;
}
-static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu)
+static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu,
+ cpumask_t *cpu_map)
{
unsigned int icpu;
- for_each_online_cpu(icpu) {
+ for_each_cpu(icpu, cpu_map) {
if (!cpu_online && icpu == cpu)
continue;
update_per_cpu_data_slice_size_cpu(icpu);
+ setup_pcp_cacheinfo(icpu);
}
}
static int cacheinfo_cpu_online(unsigned int cpu)
{
int rc = detect_cache_attributes(cpu);
+ cpumask_t *cpu_map;
if (rc)
return rc;
rc = cache_add_dev(cpu);
if (rc)
goto err;
- update_per_cpu_data_slice_size(true, cpu);
- setup_pcp_cacheinfo();
+ if (cpu_map_shared_cache(true, cpu, &cpu_map))
+ update_per_cpu_data_slice_size(true, cpu, cpu_map);
return 0;
err:
free_cache_attributes(cpu);
@@ -959,12 +993,16 @@ err:
static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
+ cpumask_t *cpu_map;
+ unsigned int nr_shared;
+
+ nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map);
if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
cpu_cache_sysfs_exit(cpu);
free_cache_attributes(cpu);
- update_per_cpu_data_slice_size(false, cpu);
- setup_pcp_cacheinfo();
+ if (nr_shared > 1)
+ update_per_cpu_data_slice_size(false, cpu, cpu_map);
return 0;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 7dbf14a1d915..741497324d78 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -751,7 +751,7 @@ static int __component_add(struct device *dev, const struct component_ops *ops,
* component_bind_all(). See also &struct component_ops.
*
* @subcomponent must be nonzero and is used to differentiate between multiple
- * components registerd on the same device @dev. These components are match
+ * components registered on the same device @dev. These components are match
* using component_match_add_typed().
*
* The component needs to be unregistered at driver unload/disconnect by
@@ -781,7 +781,7 @@ EXPORT_SYMBOL_GPL(component_add_typed);
* The component needs to be unregistered at driver unload/disconnect by
* calling component_del().
*
- * See also component_add_typed() for a variant that allows multipled different
+ * See also component_add_typed() for a variant that allows multiple different
* components on the same device.
*/
int component_add(struct device *dev, const struct component_ops *ops)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 14d46af40f9a..b93f3c5716ae 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -92,12 +92,13 @@ static int __fwnode_link_add(struct fwnode_handle *con,
return 0;
}
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags)
{
int ret;
mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, 0);
+ ret = __fwnode_link_add(con, sup, flags);
mutex_unlock(&fwnode_link_lock);
return ret;
}
@@ -125,7 +126,7 @@ static void __fwnode_link_del(struct fwnode_link *link)
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
- pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
@@ -284,10 +285,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE | \
+ DL_FLAG_MANAGED)
static inline bool device_link_flag_is_sync_state_only(u32 flags)
{
- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
/**
@@ -1009,7 +1012,8 @@ static struct fwnode_handle *fwnode_links_check_suppliers(
return NULL;
list_for_each_entry(link, &fwnode->suppliers, c_hook)
- if (!(link->flags & FWLINK_FLAG_CYCLE))
+ if (!(link->flags &
+ (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE)))
return link->supplier;
return NULL;
@@ -1869,6 +1873,7 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
@@ -1900,6 +1905,63 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
}
/**
+ * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
+ * @ancestor: Firmware which is tested for being an ancestor
+ * @child: Firmware which is tested for being the child
+ *
+ * A node is considered an ancestor of itself too.
+ *
+ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
+ */
+static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor,
+ const struct fwnode_handle *child)
+{
+ struct fwnode_handle *parent;
+
+ if (IS_ERR_OR_NULL(ancestor))
+ return false;
+
+ if (child == ancestor)
+ return true;
+
+ fwnode_for_each_parent_node(child, parent) {
+ if (parent == ancestor) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
+ * @fwnode: firmware node
+ *
+ * Given a firmware node (@fwnode), this function finds its closest ancestor
+ * firmware node that has a corresponding struct device and returns that struct
+ * device.
+ *
+ * The caller is responsible for calling put_device() on the returned device
+ * pointer.
+ *
+ * Return: a pointer to the device of the @fwnode's closest ancestor.
+ */
+static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ struct device *dev;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ dev = get_dev_from_fwnode(parent);
+ if (dev) {
+ fwnode_handle_put(parent);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
* @con: Potential consumer device.
* @sup_handle: Potential supplier's fwnode.
@@ -1943,6 +2005,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
/* Termination condition. */
if (sup_dev == con) {
+ pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
@@ -1959,6 +2022,9 @@ static bool __fw_devlink_relax_cycles(struct device *con,
}
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ continue;
+
if (__fw_devlink_relax_cycles(con, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
@@ -1974,8 +2040,11 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
+ par_dev->fwnode);
ret = true;
+ }
if (!sup_dev)
goto out;
@@ -1991,6 +2060,8 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) {
+ pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
+ dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
@@ -2032,6 +2103,9 @@ static int fw_devlink_create_devlink(struct device *con,
int ret = 0;
u32 flags;
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ return 0;
+
if (con->fwnode == link->consumer)
flags = fw_devlink_get_flags(link->flags);
else
@@ -2058,13 +2132,19 @@ static int fw_devlink_create_devlink(struct device *con,
/*
* SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So cycle detection isn't necessary and shouldn't be done.
+ * So, one might expect that cycle detection isn't necessary for them.
+ * However, if the device link was marked as SYNC_STATE_ONLY because
+ * it's part of a cycle, then we still need to do cycle detection. This
+ * is because the consumer and supplier might be part of multiple cycles
+ * and we need to detect all those cycles.
*/
- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ if (!device_link_flag_is_sync_state_only(flags) ||
+ flags & DL_FLAG_CYCLE) {
device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags);
+ pr_debug("----- cycle: end -----\n");
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle);
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 47de0f140ba6..56fba44ba391 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev,
@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
#endif
static const struct attribute_group *common_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
};
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
@@ -366,7 +366,7 @@ static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
}
#endif
-struct bus_type cpu_subsys = {
+const struct bus_type cpu_subsys = {
.name = "cpu",
.dev_name = "cpu",
.match = cpu_subsys_match,
@@ -588,6 +588,7 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
+CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -602,6 +603,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -617,6 +619,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_retbleed.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 85152537dbf1..83d352394fdf 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
- dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
+ dev_warn(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
mutex_unlock(&deferred_probe_mutex);
fw_devlink_probing_done();
@@ -397,13 +397,12 @@ bool device_is_bound(struct device *dev)
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- pr_warn("%s: device %s already bound\n",
- __func__, kobject_name(&dev->kobj));
+ dev_warn(dev, "%s: device already bound\n", __func__);
return;
}
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
- __func__, dev_name(dev));
+ dev_dbg(dev, "driver: '%s': %s: bound to device\n", dev->driver->name,
+ __func__);
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
@@ -587,13 +586,13 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv)
break;
case -ENODEV:
case -ENXIO:
- pr_debug("%s: probe of %s rejects match %d\n",
- drv->name, dev_name(dev), ret);
+ dev_dbg(dev, "probe with driver %s rejects match %d\n",
+ drv->name, ret);
break;
default:
/* driver matched but the probe failed */
- pr_warn("%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ dev_err(dev, "probe with driver %s failed with error %d\n",
+ drv->name, ret);
break;
}
@@ -620,8 +619,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
if (link_ret == -EPROBE_DEFER)
return link_ret;
- pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
- drv->bus->name, __func__, drv->name, dev_name(dev));
+ dev_dbg(dev, "bus: '%s': %s: probing driver %s with device\n",
+ drv->bus->name, __func__, drv->name);
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
ret = -EBUSY;
@@ -644,8 +643,7 @@ re_probe:
ret = driver_sysfs_add(dev);
if (ret) {
- pr_err("%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ dev_err(dev, "%s: driver_sysfs_add failed\n", __func__);
goto sysfs_failed;
}
@@ -706,8 +704,8 @@ re_probe:
dev->pm_domain->sync(dev);
driver_bound(dev);
- pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: bound device to driver %s\n",
+ drv->bus->name, __func__, drv->name);
goto done;
dev_sysfs_state_synced_failed:
@@ -786,8 +784,8 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
return -EBUSY;
dev->can_match = true;
- pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n",
+ drv->bus->name, __func__, drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index ea28102d421e..da8ca01d011c 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -551,12 +551,16 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
file_size_ptr,
READING_FIRMWARE);
if (rc < 0) {
- if (rc != -ENOENT)
- dev_warn(device, "loading %s failed with error %d\n",
- path, rc);
- else
- dev_dbg(device, "loading %s failed for no such file or directory.\n",
- path);
+ if (!(fw_priv->opt_flags & FW_OPT_NO_WARN)) {
+ if (rc != -ENOENT)
+ dev_warn(device,
+ "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_dbg(device,
+ "loading %s failed for no such file or directory.\n",
+ path);
+ }
continue;
}
size = rc;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f964a7719b..c0436f46cfb7 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -188,6 +188,7 @@ static int memory_block_online(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
+ struct memory_notify arg;
struct zone *zone;
int ret;
@@ -207,9 +208,19 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
+ arg.altmap_start_pfn = start_pfn;
+ arg.altmap_nr_pages = nr_vmemmap_pages;
+ arg.start_pfn = start_pfn + nr_vmemmap_pages;
+ arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
+ ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ goto out_notifier;
+
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
+ zone, mem->altmap->inaccessible);
if (ret)
goto out;
}
@@ -231,7 +242,11 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
+ mem_hotplug_done();
+ return ret;
out:
+ memory_notify(MEM_FINISH_OFFLINE, &arg);
+out_notifier:
mem_hotplug_done();
return ret;
}
@@ -244,6 +259,7 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
+ struct memory_notify arg;
int ret;
if (!mem->zone)
@@ -275,6 +291,11 @@ static int memory_block_offline(struct memory_block *mem)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
+ arg.altmap_start_pfn = start_pfn;
+ arg.altmap_nr_pages = nr_vmemmap_pages;
+ arg.start_pfn = start_pfn + nr_vmemmap_pages;
+ arg.nr_pages = nr_pages - nr_vmemmap_pages;
+ memory_notify(MEM_FINISH_OFFLINE, &arg);
out:
mem_hotplug_done();
return ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1c05640461dd..eb72580288e6 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -126,7 +126,7 @@ static void node_access_release(struct device *dev)
}
static struct node_access_nodes *node_init_node_access(struct node *node,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *access_node;
struct device *dev;
@@ -191,7 +191,7 @@ static struct attribute *access_attrs[] = {
* @access: The access class the for the given attributes
*/
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *c;
struct node *node;
@@ -215,6 +215,7 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
}
}
}
+EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
* struct node_cache_info - Internal tracking for memory node caches
@@ -689,7 +690,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
*/
int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node *init_node, *targ_node;
struct node_access_nodes *initiator, *target;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f37ad34c80ec..11f5fdf65b9e 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -13,6 +13,8 @@
#include <linux/msi.h>
#include <linux/slab.h>
+/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */
+
#define DEV_ID_SHIFT 21
#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
@@ -172,8 +174,8 @@ static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
if (!datap)
return -ENOMEM;
- datap->devid = ida_simple_get(&platform_msi_devid_ida,
- 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ datap->devid = ida_alloc_max(&platform_msi_devid_ida,
+ (1 << DEV_ID_SHIFT) - 1, GFP_KERNEL);
if (datap->devid < 0) {
err = datap->devid;
kfree(datap);
@@ -191,7 +193,7 @@ static void platform_msi_free_priv_data(struct device *dev)
struct platform_msi_priv_data *data = dev->msi.data->platform_data;
dev->msi.data->platform_data = NULL;
- ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ ida_free(&platform_msi_devid_ida, data->devid);
kfree(data);
}
@@ -204,8 +206,8 @@ static void platform_msi_free_priv_data(struct device *dev)
* Returns:
* Zero for success, or an error code in case of failure
*/
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
int err;
@@ -219,18 +221,6 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
return err;
}
-EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
-
-/**
- * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
- * @dev: The device for which to free interrupts
- */
-void platform_msi_domain_free_irqs(struct device *dev)
-{
- msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
- platform_msi_free_priv_data(dev);
-}
-EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
/**
* platform_msi_get_host_data - Query the private data associated with
@@ -350,3 +340,104 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir
return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
+
+/* End of removal area */
+
+/* Real per device domain interfaces */
+
+/*
+ * This indirection can go when platform_device_msi_init_and_alloc_irqs()
+ * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it
+ * simple for now.
+ */
+static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ irq_write_msi_msg_t cb = d->chip_data;
+
+ cb(irq_data_get_msi_desc(d), msg);
+}
+
+static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = desc->msi_index;
+}
+
+static const struct msi_domain_template platform_msi_template = {
+ .chip = {
+ .name = "pMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_write_msi_msg = platform_msi_write_msi_msg,
+ /* The rest is filled in by the platform MSI parent */
+ },
+
+ .ops = {
+ .set_desc = platform_msi_set_desc_byindex,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_DEVICE_MSI,
+ },
+};
+
+/**
+ * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI
+ * and allocate interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ *
+ * This creates a MSI domain on @dev which has @dev->msi.domain as
+ * parent. The parent domain sets up the new domain. The domain has
+ * a fixed size of @nvec. The domain is managed by devres and will
+ * be removed when the device is removed.
+ *
+ * Note: For migration purposes this falls back to the original platform_msi code
+ * up to the point where all platforms have been converted to the MSI
+ * parent model.
+ */
+int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct irq_domain *domain = dev->msi.domain;
+
+ if (!domain || !write_msi_msg)
+ return -EINVAL;
+
+ /* Migration support. Will go away once everything is converted */
+ if (!irq_domain_is_msi_parent(domain))
+ return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg);
+
+ /*
+ * @write_msi_msg is stored in the resulting msi_domain_info::data.
+ * The underlying domain creation mechanism will assign that
+ * callback to the resulting irq chip.
+ */
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &platform_msi_template,
+ nvec, NULL, write_msi_msg))
+ return -ENODEV;
+
+ return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
+}
+EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
+
+/**
+ * platform_device_msi_free_irqs_all - Free all interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_device_msi_free_irqs_all(struct device *dev)
+{
+ struct irq_domain *domain = dev->msi.domain;
+
+ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+
+ /* Migration support. Will go away once everything is converted */
+ if (!irq_domain_is_msi_parent(domain))
+ platform_msi_free_priv_data(dev);
+}
+EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 44ec20918a4d..327d168dd37a 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -168,6 +168,115 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
/**
+ * dev_pm_domain_attach_list - Associate a device with its PM domains.
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * This function helps to attach a device to its multiple PM domains. The
+ * caller, which is typically a driver's probe function, may provide a list of
+ * names for the PM domains that we should try to attach the device to, but it
+ * may also provide an empty list, in case the attach should be done for all of
+ * the available PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure. Note that, to detach the list of PM domains, the driver shall call
+ * dev_pm_domain_detach_list(), typically during the remove phase.
+ */
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ struct device_node *np = dev->of_node;
+ struct dev_pm_domain_list *pds;
+ struct device *pd_dev = NULL;
+ int ret, i, num_pds = 0;
+ bool by_id = true;
+ u32 pd_flags = data ? data->pd_flags : 0;
+ u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ /* For now this is limited to OF based platforms. */
+ if (!np)
+ return 0;
+
+ if (data && data->pd_names) {
+ num_pds = data->num_pd_names;
+ by_id = false;
+ } else {
+ num_pds = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ }
+
+ if (num_pds <= 0)
+ return 0;
+
+ pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs),
+ GFP_KERNEL);
+ if (!pds->pd_devs)
+ return -ENOMEM;
+
+ pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links),
+ GFP_KERNEL);
+ if (!pds->pd_links)
+ return -ENOMEM;
+
+ if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
+ link_flags |= DL_FLAG_RPM_ACTIVE;
+
+ for (i = 0; i < num_pds; i++) {
+ if (by_id)
+ pd_dev = dev_pm_domain_attach_by_id(dev, i);
+ else
+ pd_dev = dev_pm_domain_attach_by_name(dev,
+ data->pd_names[i]);
+ if (IS_ERR_OR_NULL(pd_dev)) {
+ ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV;
+ goto err_attach;
+ }
+
+ if (link_flags) {
+ struct device_link *link;
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ ret = -ENODEV;
+ goto err_link;
+ }
+
+ pds->pd_links[i] = link;
+ }
+
+ pds->pd_devs[i] = pd_dev;
+ }
+
+ pds->num_pds = num_pds;
+ *list = pds;
+ return num_pds;
+
+err_link:
+ dev_pm_domain_detach(pd_dev, true);
+err_attach:
+ while (--i >= 0) {
+ if (pds->pd_links[i])
+ device_link_del(pds->pd_links[i]);
+ dev_pm_domain_detach(pds->pd_devs[i], true);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
@@ -188,6 +297,31 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_detach_list - Detach a list of PM domains.
+ * @list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from dev_pm_domain_attach_list().
+ * Typically it should be invoked during the remove phase from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
+{
+ int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->num_pds; i++) {
+ if (list->pd_links[i])
+ device_link_del(list->pd_links[i]);
+ dev_pm_domain_detach(list->pd_devs[i], true);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
+
+/**
* dev_pm_domain_start - Start the device through its PM domain.
* @dev: Device to start.
*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fadcd0379dc2..5679f966f676 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -60,7 +60,6 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
-struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@@ -578,6 +577,35 @@ bool dev_pm_skip_resume(struct device *dev)
return !dev->power.must_resume;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ dev->power.async_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+ }
+ /*
+ * Because async_schedule_dev_nocall() above has returned false or it
+ * has not been called at all, func() is not running and it is safe to
+ * update the async_in_progress flag without extra synchronization.
+ */
+ dev->power.async_in_progress = false;
+ return false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -657,42 +685,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
-static bool dpm_async_fn(struct device *dev, async_func_t func)
-{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
-
- get_device(dev);
-
- if (async_schedule_dev_nocall(func, dev))
- return true;
-
- put_device(dev);
- }
- /*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
- */
- dev->power.async_in_progress = false;
- return false;
-}
-
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
@@ -707,9 +705,12 @@ static void dpm_noirq_resume_devices(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -736,6 +737,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@@ -817,8 +821,7 @@ Out:
complete_all(&dev->power.completion);
if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
@@ -842,9 +845,12 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -871,6 +877,9 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -974,8 +983,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@@ -1004,10 +1012,11 @@ void dpm_resume(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -1017,29 +1026,25 @@ void dpm_resume(pm_message_t state)
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
-
- get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
if (!dev->power.async_in_progress) {
+ get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
device_resume(dev, state, false);
+ put_device(dev);
+
mutex_lock(&dpm_list_mtx);
}
-
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
-
- mutex_unlock(&dpm_list_mtx);
-
- put_device(dev);
-
- mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@@ -1187,7 +1192,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
}
/**
- * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1195,7 +1200,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1240,6 +1245,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
@@ -1269,54 +1276,37 @@ Complete:
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend_noirq(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_noirq(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_noirq(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_noirq))
- return 0;
-
- return __device_suspend_noirq(dev, pm_transition, false);
-}
-
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend_noirq(dev);
+ list_move(&dev->power.entry, &dpm_noirq_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_noirq_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_noirq(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1324,15 +1314,16 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend_noirq++;
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- }
+
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@@ -1375,14 +1366,14 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
}
/**
- * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1434,6 +1425,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1450,24 +1443,11 @@ Complete:
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = __device_suspend_late(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_late(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_late(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_late))
- return 0;
-
- return __device_suspend_late(dev, pm_transition, false);
-}
-
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -1478,32 +1458,28 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- wake_up_all_idle_cpus();
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
-
- get_device(dev);
+ wake_up_all_idle_cpus();
- mutex_unlock(&dpm_list_mtx);
+ mutex_lock(&dpm_list_mtx);
- error = device_suspend_late(dev);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
- mutex_lock(&dpm_list_mtx);
+ list_move(&dev->power.entry, &dpm_late_early_list);
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
+ if (dpm_async_fn(dev, async_suspend_late))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " late", error);
- dpm_save_failed_dev(dev_name(dev));
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_late(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1511,12 +1487,14 @@ int dpm_suspend_late(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
+
if (error) {
- suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@@ -1597,12 +1575,12 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
}
/**
- * __device_suspend - Execute "suspend" callbacks for given device.
+ * device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+static int device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1716,8 +1694,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
- if (error)
+ if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
@@ -1727,25 +1708,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend))
- return 0;
-
- return __device_suspend(dev, pm_transition, false);
-}
-
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1761,29 +1728,25 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
- get_device(dev);
+ mutex_lock(&dpm_list_mtx);
- mutex_unlock(&dpm_list_mtx);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
- error = device_suspend(dev);
+ list_move(&dev->power.entry, &dpm_suspended_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend))
+ continue;
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_suspended_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1791,14 +1754,16 @@ int dpm_suspend(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
+
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
- }
+
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
@@ -1949,11 +1914,11 @@ int dpm_suspend_start(pm_message_t state)
int error;
error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
+ if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- } else
+ else
error = dpm_suspend(state);
+
dpm_show_time(starttime, state, error, "start");
return error;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 05793c9fbb84..2ee45841486b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -94,6 +94,7 @@ static void update_pm_runtime_accounting(struct device *dev)
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
@@ -1176,7 +1177,7 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
@@ -1197,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
-int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
@@ -1218,9 +1219,40 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
return retval;
}
+
+/**
+ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
+ * in active state
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
+ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
+ * device, in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_active(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
+ * it returns 1. If the device is in a different state or its usage_count is 0,
+ * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
+ * in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 42171f766dcb..5a5a9e978e85 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index a1b01ab42052..7324a704a9a1 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -7,15 +7,16 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/kconfig.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_graph.h>
-#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
struct fwnode_handle *__dev_fwnode(struct device *dev)
{
@@ -700,34 +701,6 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
- * @fwnode: firmware node
- *
- * Given a firmware node (@fwnode), this function finds its closest ancestor
- * firmware node that has a corresponding struct device and returns that struct
- * device.
- *
- * The caller is responsible for calling put_device() on the returned device
- * pointer.
- *
- * Return: a pointer to the device of the @fwnode's closest ancestor.
- */
-struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *parent;
- struct device *dev;
-
- fwnode_for_each_parent_node(fwnode, parent) {
- dev = get_dev_from_fwnode(parent);
- if (dev) {
- fwnode_handle_put(parent);
- return dev;
- }
- }
- return NULL;
-}
-
-/**
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
@@ -774,34 +747,6 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
- * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
- * @ancestor: Firmware which is tested for being an ancestor
- * @child: Firmware which is tested for being the child
- *
- * A node is considered an ancestor of itself too.
- *
- * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
- */
-bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child)
-{
- struct fwnode_handle *parent;
-
- if (IS_ERR_OR_NULL(ancestor))
- return false;
-
- if (child == ancestor)
- return true;
-
- fwnode_for_each_parent_node(child, parent) {
- if (parent == ancestor) {
- fwnode_handle_put(parent);
- return true;
- }
- }
- return false;
-}
-
-/**
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 583dd5d7d46b..bcdb25bec77c 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -93,6 +93,7 @@ struct regmap {
#endif
unsigned int max_register;
+ bool max_register_is_set;
bool (*writeable_reg)(struct device *dev, unsigned int reg);
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index b7e4b2464102..9b17c77dec9d 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -23,7 +23,7 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- if (!map || map->reg_stride_order < 0 || !map->max_register)
+ if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index ac63a73ccdaa..2e41cb12b8e2 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -187,8 +187,10 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return 0;
}
- if (!map->max_register && map->num_reg_defaults_raw)
+ if (!map->max_register_is_set && map->num_reg_defaults_raw) {
map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
+ map->max_register_is_set = true;
+ }
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 026bdcb45127..bb2ab6129f38 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12
+static void get_changed_bytes(void *orig, void *new, size_t size)
+{
+ char *o = orig;
+ char *n = new;
+ int i;
+
+ get_random_bytes(new, size);
+
+ /*
+ * This could be nicer and more efficient but we shouldn't
+ * super care.
+ */
+ for (i = 0; i < size; i++)
+ while (n[i] == o[i])
+ get_random_bytes(&n[i], 1);
+}
+
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
@@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- unsigned int val, val_test, val_last;
+ unsigned int val;
+ u16 val_test, val_last;
u16 val_array[BLOCK_TEST_SIZE];
config = raw_regmap_config;
@@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- u16 val[2];
+ u16 val[3];
u16 *hw_buf;
unsigned int rval;
int i;
@@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals;
- get_random_bytes(&val, sizeof(val));
+ get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
- if (config.val_format_endian == REGMAP_ENDIAN_BIG)
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- be16_to_cpu(val[0])));
- else
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- le16_to_cpu(val[0])));
+ KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
+ sizeof(u16) * 2));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
@@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test)
switch (i) {
case 2:
case 3:
- case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu(val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu(val[i - 2]));
}
break;
+ case 4:
+ KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
+ break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
+
+ /*
+ * The value written via _write() was translated by the core,
+ * translate the original copy for comparison purposes.
+ */
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
+ val[2] = cpu_to_be16(val[2]);
+ else
+ val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
@@ -1312,8 +1336,72 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
+
+ regmap_exit(map);
+}
+
+static void raw_ranges(struct kunit *test)
+{
+ struct raw_test_types *t = (struct raw_test_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = raw_regmap_config;
+ config.volatile_reg = test_range_all_volatile;
+ config.ranges = &test_range;
+ config.num_ranges = 1;
+ config.max_register = test_range.range_max;
+
+ map = gen_raw_regmap(&config, t, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Reset the page to a non-zero value to trigger a change */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
+ test_range.range_max));
+
+ /* Check we set the page and use the window for writes */
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
+ test_range.range_min +
+ test_range.window_len,
+ 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ /* Same for reads */
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
+ test_range.range_min +
+ test_range.window_len,
+ &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ /* No physical access triggered in the virtual range */
+ for (i = test_range.range_min; i < test_range.range_max; i++) {
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ }
regmap_exit(map);
}
@@ -1345,6 +1433,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
+ KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
{}
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6db77d8e45f9..5cb425f6f02d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->writeable_reg)
@@ -112,7 +112,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
if (!map->cache_ops)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
map->lock(map->lock_arg);
@@ -129,7 +129,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (!map->reg_read)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->format.format_write)
@@ -787,6 +787,7 @@ struct regmap *__regmap_init(struct device *dev,
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->wr_table = config->wr_table;
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
@@ -1412,6 +1413,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
regmap_debugfs_exit(map);
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
@@ -3383,7 +3385,7 @@ EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
*/
int regmap_get_max_register(struct regmap *map)
{
- return map->max_register ? map->max_register : -EINVAL;
+ return map->max_register_is_set ? map->max_register : -EINVAL;
}
EXPORT_SYMBOL_GPL(regmap_get_max_register);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 36512fb75a20..eb6eb25b343b 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -6,10 +6,21 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include "base.h"