summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2025-03-24 14:19:50 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2025-03-24 14:19:50 +0100
commit1774be7cfc2fc3a606aa8e9b855427877104804b (patch)
tree2c9a95190c1bfda3f604967607b098d2ab3e932b
parent80f0f07946ae304c0fcf9cd1452df6312b6f35e4 (diff)
parent7a6589f1aa9b6721b6ad01b6f71754570ad76d27 (diff)
Merge branch 'pm-cpufreq'
Merge cpufreq updates for 6.15-rc1: - Manage sysfs attributes and boost frequencies efficiently from cpufreq core to reduce boilerplate code from drivers (Viresh Kumar). - Minor cleanups to cpufreq drivers (Aaron Kling, Benjamin Schneider, Dhananjay Ugwekar, Imran Shaik, and zuoqian). - Migrate some cpufreq drivers to using for_each_present_cpu() (Jacky Bai). - cpufreq-qcom-hw DT binding fixes (Krzysztof Kozlowski). - Use str_enable_disable() helper in cpufreq_online() (Lifeng Zheng). - Optimize the amd-pstate driver to avoid cases where call paths end up calling the same writes multiple times and needlessly caching variables through code reorganization, locking overhaul and tracing adjustments (Mario Limonciello, Dhananjay Ugwekar). - Make it possible to avoid enabling capacity-aware scheduling (CAS) in the intel_pstate driver and relocate a check for out-of-band (OOB) platform handling in it to make it detect OOB before checking HWP availability (Rafael Wysocki). - Fix dbs_update() to avoid inadvertent conversions of negative integer values to unsigned int which causes CPU frequency selection to be inaccurate in some cases when the "conservative" cpufreq governor is in use (Jie Zhan). * pm-cpufreq: (91 commits) dt-bindings: cpufreq: cpufreq-qcom-hw: Narrow properties on SDX75, SA8775p and SM8650 dt-bindings: cpufreq: cpufreq-qcom-hw: Drop redundant minItems:1 dt-bindings: cpufreq: cpufreq-qcom-hw: Add missing constraint for interrupt-names dt-bindings: cpufreq: cpufreq-qcom-hw: Add QCS8300 compatible cpufreq: Init cpufreq only for present CPUs cpufreq: tegra186: Share policy per cluster cpufreq/amd-pstate: Drop actions in amd_pstate_epp_cpu_offline() cpufreq/amd-pstate: Stop caching EPP cpufreq/amd-pstate: Rework CPPC enabling cpufreq/amd-pstate: Drop debug statements for policy setting cpufreq/amd-pstate: Update cppc_req_cached for shared mem EPP writes cpufreq/amd-pstate: Move all EPP tracing into *_update_perf and *_set_epp functions cpufreq/amd-pstate: Cache CPPC request in shared mem case too cpufreq/amd-pstate: Replace all AMD_CPPC_* macros with masks cpufreq/amd-pstate-ut: Adjust variable scope cpufreq/amd-pstate-ut: Run on all of the correct CPUs cpufreq/amd-pstate-ut: Drop SUCCESS and FAIL enums cpufreq/amd-pstate-ut: Allow lowest nonlinear and lowest to be the same cpufreq/amd-pstate-ut: Use _free macro to free put policy cpufreq/amd-pstate: Drop `cppc_cap1_cached` ...
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst3
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml35
-rw-r--r--arch/x86/include/asm/msr-index.h20
-rw-r--r--arch/x86/kernel/acpi/cppc.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h57
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c211
-rw-r--r--drivers/cpufreq/amd-pstate.c670
-rw-r--r--drivers/cpufreq/amd-pstate.h65
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c18
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c1
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c1
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c9
-rw-r--r--drivers/cpufreq/cpufreq-dt.c24
-rw-r--r--drivers/cpufreq/cpufreq.c46
-rw-r--r--drivers/cpufreq/cpufreq_governor.c45
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c27
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/longhaul.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c1
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c11
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c3
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--drivers/cpufreq/powernow-k7.c1
-rw-r--r--drivers/cpufreq/powernow-k8.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c11
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c16
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c8
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c1
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c21
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/sh-cpufreq.c1
-rw-r--r--drivers/cpufreq/spear-cpufreq.c1
-rw-r--r--drivers/cpufreq/speedstep-centrino.c1
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c8
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c1
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c1
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c3
-rw-r--r--include/linux/cpufreq.h24
59 files changed, 617 insertions, 799 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fb8752b42ec8..77e671e55993 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2316,6 +2316,9 @@
per_cpu_perf_limits
Allow per-logical-CPU P-State performance control limits using
cpufreq sysfs interface
+ no_cas
+ Do not enable capacity-aware scheduling (CAS) on
+ hybrid systems
intremap= [X86-64,Intel-IOMMU,EARLY]
on enable Interrupt Remapping (default)
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index bf13ad25a32f..78fc83ed2a7e 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -696,6 +696,9 @@ of them have to be prepended with the ``intel_pstate=`` prefix.
Use per-logical-CPU P-State limits (see `Coordination of P-state
Limits`_ for details).
+``no_cas``
+ Do not enable capacity-aware scheduling (CAS) which is enabled by
+ default on hybrid systems.
Diagnostics and Tuning
======================
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index e937eb7355e7..e0242bed3342 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -34,6 +34,7 @@ properties:
- description: v2 of CPUFREQ HW (EPSS)
items:
- enum:
+ - qcom,qcs8300-cpufreq-epss
- qcom,qdu1000-cpufreq-epss
- qcom,sa8255p-cpufreq-epss
- qcom,sa8775p-cpufreq-epss
@@ -111,22 +112,20 @@ allOf:
enum:
- qcom,qcm2290-cpufreq-hw
- qcom,sar2130p-cpufreq-epss
+ - qcom,sdx75-cpufreq-epss
then:
properties:
reg:
- minItems: 1
maxItems: 1
reg-names:
- minItems: 1
maxItems: 1
interrupts:
- minItems: 1
maxItems: 1
interrupt-names:
- minItems: 1
+ maxItems: 1
- if:
properties:
@@ -135,6 +134,7 @@ allOf:
enum:
- qcom,qdu1000-cpufreq-epss
- qcom,sa8255p-cpufreq-epss
+ - qcom,sa8775p-cpufreq-epss
- qcom,sc7180-cpufreq-hw
- qcom,sc8180x-cpufreq-hw
- qcom,sc8280xp-cpufreq-epss
@@ -160,12 +160,14 @@ allOf:
interrupt-names:
minItems: 2
+ maxItems: 2
- if:
properties:
compatible:
contains:
enum:
+ - qcom,qcs8300-cpufreq-epss
- qcom,sc7280-cpufreq-epss
- qcom,sm8250-cpufreq-epss
- qcom,sm8350-cpufreq-epss
@@ -187,6 +189,7 @@ allOf:
interrupt-names:
minItems: 3
+ maxItems: 3
- if:
properties:
@@ -211,7 +214,31 @@ allOf:
interrupt-names:
minItems: 2
+ maxItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8650-cpufreq-epss
+ then:
+ properties:
+ reg:
+ minItems: 4
+ maxItems: 4
+
+ reg-names:
+ minItems: 4
+ maxItems: 4
+
+ interrupts:
+ minItems: 4
+ maxItems: 4
+
+ interrupt-names:
+ minItems: 4
+ maxItems: 4
examples:
- |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 72765b2fe0d8..fc2634cc48fd 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -701,15 +701,17 @@
#define MSR_AMD_CPPC_REQ 0xc00102b3
#define MSR_AMD_CPPC_STATUS 0xc00102b4
-#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff)
-#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff)
-#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff)
-#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff)
-
-#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0)
-#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8)
-#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
-#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
+/* Masks for use with MSR_AMD_CPPC_CAP1 */
+#define AMD_CPPC_LOWEST_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_LOWNONLIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_NOMINAL_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_HIGHEST_PERF_MASK GENMASK(31, 24)
+
+/* Masks for use with MSR_AMD_CPPC_REQ */
+#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
/* AMD Performance Counter Global Status and Control MSRs */
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index d745dd586303..77bfb846490c 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -4,6 +4,8 @@
* Copyright (c) 2016, Intel Corporation.
*/
+#include <linux/bitfield.h>
+
#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -149,7 +151,7 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
if (ret)
goto out;
- val = AMD_CPPC_HIGHEST_PERF(val);
+ val = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, val);
} else {
ret = cppc_get_highest_perf(cpu, &val);
if (ret)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 9e46960f6a86..4f9cb943d945 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -254,7 +254,7 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
default y
help
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 463b69a2dff5..924314cdeebc 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -909,6 +909,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ if (acpi_cpufreq_driver.set_boost)
+ policy->boost_supported = true;
+
return result;
err_unreg:
@@ -949,7 +952,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct freq_attr *acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 8d692415d905..32e1bdc588c5 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -24,9 +24,9 @@
TRACE_EVENT(amd_pstate_perf,
- TP_PROTO(unsigned long min_perf,
- unsigned long target_perf,
- unsigned long capacity,
+ TP_PROTO(u8 min_perf,
+ u8 target_perf,
+ u8 capacity,
u64 freq,
u64 mperf,
u64 aperf,
@@ -47,9 +47,9 @@ TRACE_EVENT(amd_pstate_perf,
),
TP_STRUCT__entry(
- __field(unsigned long, min_perf)
- __field(unsigned long, target_perf)
- __field(unsigned long, capacity)
+ __field(u8, min_perf)
+ __field(u8, target_perf)
+ __field(u8, capacity)
__field(unsigned long long, freq)
__field(unsigned long long, mperf)
__field(unsigned long long, aperf)
@@ -70,10 +70,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
- (unsigned long)__entry->min_perf,
- (unsigned long)__entry->target_perf,
- (unsigned long)__entry->capacity,
+ TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
+ (u8)__entry->min_perf,
+ (u8)__entry->target_perf,
+ (u8)__entry->capacity,
(unsigned long long)__entry->freq,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
@@ -86,11 +86,12 @@ TRACE_EVENT(amd_pstate_perf,
TRACE_EVENT(amd_pstate_epp_perf,
TP_PROTO(unsigned int cpu_id,
- unsigned int highest_perf,
- unsigned int epp,
- unsigned int min_perf,
- unsigned int max_perf,
- bool boost
+ u8 highest_perf,
+ u8 epp,
+ u8 min_perf,
+ u8 max_perf,
+ bool boost,
+ bool changed
),
TP_ARGS(cpu_id,
@@ -98,15 +99,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
epp,
min_perf,
max_perf,
- boost),
+ boost,
+ changed),
TP_STRUCT__entry(
__field(unsigned int, cpu_id)
- __field(unsigned int, highest_perf)
- __field(unsigned int, epp)
- __field(unsigned int, min_perf)
- __field(unsigned int, max_perf)
+ __field(u8, highest_perf)
+ __field(u8, epp)
+ __field(u8, min_perf)
+ __field(u8, max_perf)
__field(bool, boost)
+ __field(bool, changed)
),
TP_fast_assign(
@@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
__entry->min_perf = min_perf;
__entry->max_perf = max_perf;
__entry->boost = boost;
+ __entry->changed = changed;
),
- TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
(unsigned int)__entry->cpu_id,
- (unsigned int)__entry->min_perf,
- (unsigned int)__entry->max_perf,
- (unsigned int)__entry->highest_perf,
- (unsigned int)__entry->epp,
- (bool)__entry->boost
+ (u8)__entry->min_perf,
+ (u8)__entry->max_perf,
+ (u8)__entry->highest_perf,
+ (u8)__entry->epp,
+ (bool)__entry->boost,
+ (bool)__entry->changed
)
);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 3a0a380c3590..e671bc7d1550 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -22,39 +22,31 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
+#include <linux/cleanup.h>
#include <acpi/cppc_acpi.h>
#include "amd-pstate.h"
-/*
- * Abbreviations:
- * amd_pstate_ut: used as a shortform for AMD P-State unit test.
- * It helps to keep variable names smaller, simpler
- */
-enum amd_pstate_ut_result {
- AMD_PSTATE_UT_RESULT_PASS,
- AMD_PSTATE_UT_RESULT_FAIL,
-};
struct amd_pstate_ut_struct {
const char *name;
- void (*func)(u32 index);
- enum amd_pstate_ut_result result;
+ int (*func)(u32 index);
};
/*
* Kernel module for testing the AMD P-State unit test
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index);
-static void amd_pstate_ut_check_enabled(u32 index);
-static void amd_pstate_ut_check_perf(u32 index);
-static void amd_pstate_ut_check_freq(u32 index);
-static void amd_pstate_ut_check_driver(u32 index);
+static int amd_pstate_ut_acpi_cpc_valid(u32 index);
+static int amd_pstate_ut_check_enabled(u32 index);
+static int amd_pstate_ut_check_perf(u32 index);
+static int amd_pstate_ut_check_freq(u32 index);
+static int amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
@@ -77,71 +69,67 @@ static bool get_shared_mem(void)
/*
* check the _CPC object is present in SBIOS.
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+static int amd_pstate_ut_acpi_cpc_valid(u32 index)
{
- if (acpi_cpc_valid())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (!acpi_cpc_valid()) {
pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ return -EINVAL;
}
+
+ return 0;
}
-static void amd_pstate_ut_pstate_enable(u32 index)
+/*
+ * check if amd pstate is enabled
+ */
+static int amd_pstate_ut_check_enabled(u32 index)
{
- int ret = 0;
u64 cppc_enable = 0;
+ int ret;
+
+ if (get_shared_mem())
+ return 0;
ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
- return;
+ return ret;
}
- if (cppc_enable)
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+
+ if (!cppc_enable) {
pr_err("%s amd pstate must be enabled!\n", __func__);
+ return -EINVAL;
}
-}
-/*
- * check if amd pstate is enabled
- */
-static void amd_pstate_ut_check_enabled(u32 index)
-{
- if (get_shared_mem())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else
- amd_pstate_ut_pstate_enable(index);
+ return 0;
}
/*
* check if performance values are reasonable.
* highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
*/
-static void amd_pstate_ut_check_perf(u32 index)
+static int amd_pstate_ut_check_perf(u32 index)
{
int cpu = 0, ret = 0;
u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
u64 cap1 = 0;
struct cppc_perf_caps cppc_perf;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
+ union perf_cached cur_perf;
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
- for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
if (get_shared_mem()) {
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
highest_perf = cppc_perf.highest_perf;
@@ -151,50 +139,44 @@ static void amd_pstate_ut_check_perf(u32 index)
} else {
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
- nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
- lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
- lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
+ nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
}
- if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
+ cur_perf = READ_ONCE(cpudata->perf);
+ if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
- __func__, cpu, highest_perf, cpudata->highest_perf);
- goto skip_test;
+ __func__, cpu, highest_perf, cur_perf.highest_perf);
+ return -EINVAL;
}
- if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
- (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
- (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (nominal_perf != cur_perf.nominal_perf ||
+ (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
+ (lowest_perf != cur_perf.lowest_perf)) {
pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, nominal_perf, cpudata->nominal_perf,
- lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
- lowest_perf, cpudata->lowest_perf);
- goto skip_test;
+ __func__, cpu, nominal_perf, cur_perf.nominal_perf,
+ lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
+ lowest_perf, cur_perf.lowest_perf);
+ return -EINVAL;
}
if (!((highest_perf >= nominal_perf) &&
(nominal_perf > lowest_nonlinear_perf) &&
- (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_nonlinear_perf >= lowest_perf) &&
(lowest_perf > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
__func__, cpu, highest_perf, nominal_perf,
lowest_nonlinear_perf, lowest_perf);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
/*
@@ -202,59 +184,50 @@ skip_test:
* max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
* check max freq when set support boost mode.
*/
-static void amd_pstate_ut_check_freq(u32 index)
+static int amd_pstate_ut_check_freq(u32 index)
{
int cpu = 0;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
- (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
- (cpudata->min_freq > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
+ (policy->cpuinfo.min_freq > 0))) {
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
- cpudata->lowest_nonlinear_freq, cpudata->min_freq);
- goto skip_test;
+ __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
+ return -EINVAL;
}
if (cpudata->lowest_nonlinear_freq != policy->min) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
__func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
- goto skip_test;
+ return -EINVAL;
}
if (cpudata->boost_supported) {
- if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if ((policy->max != policy->cpuinfo.max_freq) &&
+ (policy->max != cpudata->nominal_freq)) {
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
- __func__, cpu, policy->max, cpudata->max_freq,
+ __func__, cpu, policy->max, policy->cpuinfo.max_freq,
cpudata->nominal_freq);
- goto skip_test;
+ return -EINVAL;
}
} else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
static int amd_pstate_set_mode(enum amd_pstate_mode mode)
@@ -266,32 +239,28 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
return amd_pstate_update_status(mode_str, strlen(mode_str));
}
-static void amd_pstate_ut_check_driver(u32 index)
+static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
- int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
- ret = amd_pstate_set_mode(mode1);
+ int ret = amd_pstate_set_mode(mode1);
if (ret)
- goto out;
+ return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
ret = amd_pstate_set_mode(mode2);
- if (ret)
- goto out;
+ if (ret) {
+ pr_err("%s: failed to update status for %s->%s\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2));
+ return ret;
+ }
}
}
-out:
- if (ret)
- pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
- amd_pstate_get_mode_string(mode1),
- amd_pstate_get_mode_string(mode2), ret);
-
- amd_pstate_ut_cases[index].result = ret ?
- AMD_PSTATE_UT_RESULT_FAIL :
- AMD_PSTATE_UT_RESULT_PASS;
+
+ return 0;
}
static int __init amd_pstate_ut_init(void)
@@ -299,16 +268,12 @@ static int __init amd_pstate_ut_init(void)
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
for (i = 0; i < arr_size; i++) {
- amd_pstate_ut_cases[i].func(i);
- switch (amd_pstate_ut_cases[i].result) {
- case AMD_PSTATE_UT_RESULT_PASS:
+ int ret = amd_pstate_ut_cases[i].func(i);
+
+ if (ret)
+ pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
+ else
pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- case AMD_PSTATE_UT_RESULT_FAIL:
- default:
- pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- }
}
return 0;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 313550fa62d4..6789eed1bb5b 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -85,15 +85,9 @@ static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
-static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
-#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
-#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
-#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
-#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
-
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -142,6 +136,19 @@ static struct quirk_entry quirk_amd_7k62 = {
.lowest_freq = 550,
};
+static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val)
+{
+ u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq);
+
+ return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf);
+}
+
+static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val)
+{
+ return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val,
+ perf.nominal_perf);
+}
+
static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
{
/**
@@ -183,10 +190,9 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
return -EINVAL;
}
-static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 msr_get_epp(struct amd_cpudata *cpudata)
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
{
u64 value;
int ret;
@@ -207,7 +213,7 @@ static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
return static_call(amd_pstate_get_epp)(cpudata);
}
-static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
{
u64 epp;
int ret;
@@ -218,12 +224,13 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
}
-static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
value = prev = READ_ONCE(cpudata->cppc_req_cached);
@@ -235,6 +242,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -249,24 +268,24 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- WRITE_ONCE(cpudata->epp_cached, epp);
return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, u32 epp,
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
+ u8 min_perf, u8 des_perf,
+ u8 max_perf, u8 epp,
bool fast_switch)
{
- return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
max_perf, epp, fast_switch);
}
-static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
int ret;
@@ -274,6 +293,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
value &= ~AMD_CPPC_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -284,7 +316,6 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
}
/* update both so that msr_update_perf() can effectively check */
- WRITE_ONCE(cpudata->epp_cached, epp);
WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
@@ -292,17 +323,35 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- return static_call(amd_pstate_set_epp)(cpudata, epp);
+ return static_call(amd_pstate_set_epp)(policy, epp);
}
-static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- int ret;
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
+ u64 value;
+ int ret;
+
+
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != epp_cached);
+ }
- if (epp == cpudata->epp_cached)
+ if (epp == epp_cached)
return 0;
perf_ctrls.energy_perf = epp;
@@ -311,106 +360,35 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
pr_debug("failed to set energy perf value (%d)\n", ret);
return ret;
}
- WRITE_ONCE(cpudata->epp_cached, epp);
- return ret;
-}
-
-static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
- int pref_index)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- int epp;
-
- if (!pref_index)
- epp = cpudata->epp_default;
- else
- epp = epp_values[pref_index];
-
- if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
- pr_debug("EPP cannot be set under performance policy\n");
- return -EBUSY;
- }
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- epp,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
- policy->boost_enabled);
- }
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return ret;
}
-static inline int msr_cppc_enable(bool enable)
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
{
- int ret, cpu;
- unsigned long logical_proc_id_mask = 0;
-
- /*
- * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
- */
- if (!enable)
- return 0;
-
- if (enable == cppc_enabled)
- return 0;
-
- for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_package_id(cpu);
-
- if (test_bit(logical_id, &logical_proc_id_mask))
- continue;
-
- set_bit(logical_id, &logical_proc_id_mask);
-
- ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
- enable);
- if (ret)
- return ret;
- }
-
- cppc_enabled = enable;
- return 0;
+ return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
}
-static int shmem_cppc_enable(bool enable)
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
{
- int cpu, ret = 0;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (enable == cppc_enabled)
- return 0;
-
- for_each_present_cpu(cpu) {
- ret = cppc_set_enable(cpu, enable);
- if (ret)
- return ret;
-
- /* Enable autonomous mode for EPP */
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- /* Set desired perf as zero to allow EPP firmware control */
- perf_ctrls.desired_perf = 0;
- ret = cppc_set_perf(cpu, &perf_ctrls);
- if (ret)
- return ret;
- }
- }
-
- cppc_enabled = enable;
- return ret;
+ return cppc_set_enable(policy->cpu, 1);
}
DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
-static inline int amd_pstate_cppc_enable(bool enable)
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
{
- return static_call(amd_pstate_cppc_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(policy);
}
static int msr_init_perf(struct amd_cpudata *cpudata)
{
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u64 cap1, numerator;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
@@ -422,19 +400,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, numerator);
- WRITE_ONCE(cpudata->max_limit_perf, numerator);
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
- WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1));
+
return 0;
}
static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u64 numerator;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -445,14 +426,14 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, numerator);
- WRITE_ONCE(cpudata->max_limit_perf, numerator);
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = cppc_perf.lowest_perf;
+ perf.nominal_perf = cppc_perf.nominal_perf;
+ perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ perf.lowest_perf = cppc_perf.lowest_perf;
+ WRITE_ONCE(cpudata->perf, perf);
WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@@ -479,23 +460,56 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u64 value, prev;
+ int ret;
if (cppc_state == AMD_PSTATE_ACTIVE) {
- int ret = shmem_set_epp(cpudata, epp);
+ int ret = shmem_set_epp(policy, epp);
if (ret)
return ret;
}
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- return cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -531,17 +545,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
return true;
}
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+ u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
{
- unsigned long max_freq;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ if (!policy)
+ return;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
- max_freq = READ_ONCE(cpudata->max_limit_freq);
- policy->cur = div_u64(des_perf * max_freq, max_perf);
+ policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
@@ -550,7 +565,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
- max_perf = min_t(unsigned long, nominal_perf, max_perf);
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
@@ -558,9 +573,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cpudata->cpu, fast_switch);
}
- amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
-
- cpufreq_cpu_put(policy);
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
@@ -572,7 +585,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
* amd-pstate qos_requests.
*/
if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
- struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) =
+ cpufreq_cpu_get(policy_data->cpu);
struct amd_cpudata *cpudata;
if (!policy)
@@ -580,58 +594,48 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
cpudata = policy->driver_data;
policy_data->min = cpudata->lowest_nonlinear_freq;
- cpufreq_cpu_put(policy);
}
cpufreq_verify_within_cpu_limits(policy_data);
- pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
return 0;
}
-static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
- max_limit_perf = div_u64(policy->max * max_perf, max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, max_freq);
+ perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
+ perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
-
- return 0;
+ WRITE_ONCE(cpudata->perf, perf);
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned long max_perf, min_perf, des_perf, cap_perf;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ u8 des_perf;
- if (!cpudata->max_freq)
- return -ENODEV;
+ cpudata = policy->driver_data;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
+ perf = READ_ONCE(cpudata->perf);
freqs.old = policy->cur;
freqs.new = target_freq;
- des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
- cpudata->max_freq);
+ des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq);
WARN_ON(fast_switch && !policy->fast_switch_enabled);
/*
@@ -642,8 +646,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!fast_switch)
cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, fast_switch, policy->governor->flags);
+ amd_pstate_update(cpudata, perf.min_limit_perf, des_perf,
+ perf.max_limit_perf, fast_switch,
+ policy->governor->flags);
if (!fast_switch)
cpufreq_freq_transition_end(policy, &freqs, false);
@@ -671,10 +676,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long target_perf,
unsigned long capacity)
{
- unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ u8 max_perf, min_perf, des_perf, cap_perf;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return;
@@ -684,40 +689,38 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
-
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ perf = READ_ONCE(cpudata->perf);
+ cap_perf = perf.highest_perf;
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ else
+ min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf)
- min_perf = lowest_nonlinear_perf;
+ if (min_perf < perf.min_limit_perf)
+ min_perf = perf.min_limit_perf;
- max_perf = cpudata->max_limit_perf;
+ max_perf = perf.max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
- cpufreq_cpu_put(policy);
}
static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u32 nominal_freq, max_freq;
int ret = 0;
nominal_freq = READ_ONCE(cpudata->nominal_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
+ max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf);
if (on)
policy->cpuinfo.max_freq = max_freq;
@@ -744,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
refresh_frequency_limits(policy);
@@ -821,28 +823,21 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = NULL;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
- int ret;
bool highest_perf_changed = false;
if (!amd_pstate_prefcore)
return;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- cpudata = policy->driver_data;
-
- guard(mutex)(&amd_pstate_driver_lock);
-
- ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret) {
- cpufreq_cpu_put(policy);
+ if (amd_get_highest_perf(cpu, &cur_high))
return;
- }
+
+ cpudata = policy->driver_data;
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -852,11 +847,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
- cpufreq_cpu_put(policy);
-
- if (!highest_perf_changed)
- cpufreq_update_policy(cpu);
-
}
/*
@@ -894,48 +884,45 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
}
/*
- * amd_pstate_init_freq: Initialize the max_freq, min_freq,
- * nominal_freq and lowest_nonlinear_freq for
- * the @cpudata object.
+ * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq
+ * for the @cpudata object.
*
- * Requires: highest_perf, lowest_perf, nominal_perf and
- * lowest_nonlinear_perf members of @cpudata to be
- * initialized.
+ * Requires: all perf members of @cpudata to be initialized.
*
- * Returns 0 on success, non-zero value on failure.
+ * Returns 0 on success, non-zero value on failure.
*/
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
- int ret;
- u32 min_freq, max_freq;
- u32 highest_perf, nominal_perf, nominal_freq;
- u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq;
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf;
+ int ret;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
-
- if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq;
- else
- min_freq = cppc_perf.lowest_freq;
+ perf = READ_ONCE(cpudata->perf);
if (quirks && quirks->nominal_freq)
nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ nominal_freq *= 1000;
+
+ if (quirks && quirks->lowest_freq) {
+ min_freq = quirks->lowest_freq;
+ perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq);
+ WRITE_ONCE(cpudata->perf, perf);
+ } else
+ min_freq = cppc_perf.lowest_freq;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
+ min_freq *= 1000;
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
- WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
- WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+
+ max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf);
+ lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -960,9 +947,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
- struct device *dev;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -993,19 +981,23 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
-
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
- policy->min = min_freq;
- policy->max = max_freq;
+ perf = READ_ONCE(cpudata->perf);
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
@@ -1027,9 +1019,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- cpudata->max_limit_freq = max_freq;
- cpudata->min_limit_freq = min_freq;
-
policy->driver_data = cpudata;
if (!current_pstate_driver->adjust_perf)
@@ -1040,6 +1029,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1054,28 +1044,6 @@ static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
kfree(cpudata);
}
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_cppc_enable(true);
- if (ret)
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
-
- return ret;
-}
-
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_cppc_enable(false);
- if (ret)
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
-
- return ret;
-}
-
/* Sysfs attributes */
/*
@@ -1086,27 +1054,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- max_freq = READ_ONCE(cpudata->max_freq);
- if (max_freq < 0)
- return max_freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf));
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
- if (freq < 0)
- return freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf));
}
/*
@@ -1116,18 +1084,17 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
- perf = READ_ONCE(cpudata->highest_perf);
+ cpudata = policy->driver_data;
- return sysfs_emit(buf, "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf);
}
static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
+ u8 perf;
struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->prefcore_ranking);
@@ -1168,8 +1135,10 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
+ u8 epp;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
@@ -1179,9 +1148,17 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- guard(mutex)(&amd_pstate_limits_lock);
+ if (!ret)
+ epp = cpudata->epp_default;
+ else
+ epp = epp_values[ret];
- ret = amd_pstate_set_energy_pref_index(policy, ret);
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(policy, epp);
return ret ? ret : count;
}
@@ -1190,9 +1167,11 @@ static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int preference;
+ u8 preference, epp;
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- switch (cpudata->epp_cached) {
+ switch (epp) {
case AMD_CPPC_EPP_PERFORMANCE:
preference = EPP_INDEX_PERFORMANCE;
break;
@@ -1214,7 +1193,6 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_cppc_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1248,14 +1226,6 @@ static int amd_pstate_register_driver(int mode)
cppc_state = mode;
- ret = amd_pstate_cppc_enable(true);
- if (ret) {
- pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
- ret);
- amd_pstate_driver_cleanup();
- return ret;
- }
-
/* at least one CPU supports CPB */
current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
@@ -1353,8 +1323,10 @@ int amd_pstate_update_status(const char *buf, size_t size)
if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
return -EINVAL;
- if (mode_state_machine[cppc_state][mode_idx])
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
return 0;
}
@@ -1375,7 +1347,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
return ret < 0 ? ret : count;
@@ -1451,10 +1422,11 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
struct device *dev;
u64 value;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -1485,20 +1457,25 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
+ perf = READ_ONCE(cpudata->perf);
+
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
+ policy->driver_data = cpudata;
+
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- policy->driver_data = cpudata;
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/*
* Set the policy to provide a valid fallback value in case
@@ -1518,13 +1495,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
- ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1533,6 +1505,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return 0;
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1552,24 +1525,21 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 epp;
+ union perf_cached perf;
+ u8 epp;
- amd_pstate_update_min_max_limit(policy);
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
else
- epp = READ_ONCE(cpudata->epp_cached);
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
- cpudata->min_limit_perf,
- cpudata->max_limit_perf,
- policy->boost_enabled);
- }
+ perf = READ_ONCE(cpudata->perf);
- return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, epp, false);
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
+ perf.max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1580,9 +1550,6 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
- policy->cpuinfo.max_freq, policy->max);
-
cpudata->policy = policy->policy;
ret = amd_pstate_epp_update_limit(policy);
@@ -1598,82 +1565,28 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- u64 max_perf;
- int ret;
-
- ret = amd_pstate_cppc_enable(true);
- if (ret)
- pr_err("failed to enable amd pstate during resume, return %d\n", ret);
-
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- cpudata->epp_cached,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- max_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
-}
-
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
+ pr_debug("AMD CPU Core %d going online\n", policy->cpu);
- pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
-
- ret = amd_pstate_epp_reenable(policy);
- if (ret)
- return ret;
- cpudata->suspended = false;
-
- return 0;
+ return amd_pstate_cppc_enable(policy);
}
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int min_perf;
-
- if (cpudata->suspended)
- return 0;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
-
- guard(mutex)(&amd_pstate_limits_lock);
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE,
- min_perf, min_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
+ return 0;
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
- /* avoid suspending when EPP is not enabled */
- if (cppc_state != AMD_PSTATE_ACTIVE)
- return 0;
+ /* invalidate to ensure it's rewritten during resume */
+ cpudata->cppc_req_cached = 0;
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
- /* disable CPPC in lowlevel firmware */
- ret = amd_pstate_cppc_enable(false);
- if (ret)
- pr_err("failed to suspend, return %d\n", ret);
-
return 0;
}
@@ -1682,10 +1595,12 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- guard(mutex)(&amd_pstate_limits_lock);
+ int ret;
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(policy);
+ ret = amd_pstate_epp_update_limit(policy);
+ if (ret)
+ return ret;
cpudata->suspended = false;
}
@@ -1700,8 +1615,6 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
- .suspend = amd_pstate_cpu_suspend,
- .resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1868,7 +1781,6 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
- amd_pstate_cppc_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index 9747e3be6cee..fbe1c08d3f06 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -13,6 +13,36 @@
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
+
+/**
+ * union perf_cached - A union to cache performance-related data.
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * For platforms that support the preferred core feature, the highest_perf value maybe
+ * configured to any value in the range 166-255 by the firmware (because the preferred
+ * core ranking is encoded in the highest_perf value). To maintain consistency across
+ * all platforms, we split the highest_perf and preferred core ranking values into
+ * cpudata->perf.highest_perf and cpudata->prefcore_ranking.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ */
+union perf_cached {
+ struct {
+ u8 highest_perf;
+ u8 nominal_perf;
+ u8 lowest_nonlinear_perf;
+ u8 lowest_perf;
+ u8 min_limit_perf;
+ u8 max_limit_perf;
+ };
+ u64 val;
+};
+
/**
* struct amd_aperf_mperf
* @aperf: actual performance frequency clock count
@@ -30,24 +60,11 @@ struct amd_aperf_mperf {
* @cpu: CPU number
* @req: constraint request to apply
* @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the fixed value as the highest_perf.
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
+ * @perf: cached performance-related data
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
* priority.
- * @min_limit_perf: Cached value of the performance corresponding to policy->min
- * @max_limit_perf: Cached value of the performance corresponding to policy->max
* @min_limit_freq: Cached value of policy->min (in khz)
* @max_limit_freq: Cached value of policy->max (in khz)
- * @max_freq: the frequency (in khz) that mapped to highest_perf
- * @min_freq: the frequency (in khz) that mapped to lowest_perf
* @nominal_freq: the frequency (in khz) that mapped to nominal_perf
* @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
@@ -59,7 +76,6 @@ struct amd_aperf_mperf {
* AMD P-State driver supports preferred core featue.
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
* represents all the attributes and goals that AMD P-State requests at runtime.
@@ -70,18 +86,11 @@ struct amd_cpudata {
struct freq_qos_request req[2];
u64 cppc_req_cached;
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
- u32 prefcore_ranking;
- u32 min_limit_perf;
- u32 max_limit_perf;
- u32 min_limit_freq;
- u32 max_limit_freq;
+ union perf_cached perf;
- u32 max_freq;
- u32 min_freq;
+ u8 prefcore_ranking;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 nominal_freq;
u32 lowest_nonlinear_freq;
@@ -93,11 +102,9 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_cached;
u32 policy;
- u64 cppc_cap1_cached;
bool suspended;
- s16 epp_default;
+ u8 epp_default;
};
/*
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 269b18c62d04..4994c86feb57 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -229,12 +229,6 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
return 0;
}
-static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Filled in below if boost is enabled */
- NULL,
-};
-
static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, i;
@@ -316,16 +310,6 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
policy->suspend_freq = freq_table[0].frequency;
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- } else {
- apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- apple_soc_cpufreq_driver.boost_enabled = true;
- }
- }
-
return 0;
out_free_cpufreq_table:
@@ -360,7 +344,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.target_index = apple_soc_cpufreq_set_target,
.fast_switch = apple_soc_cpufreq_fast_switch,
.register_em = cpufreq_register_em_with_opp,
- .attr = apple_soc_cpufreq_hw_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index bea41ccabf1f..f28a4435fba7 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -102,11 +102,7 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- /*
- * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
- * unstable because we do not know how to configure it properly.
- */
- /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 7a979db81f09..5a3545bd0d8d 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 17a4c174553d..36051880640b 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -150,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.get = bmips_cpufreq_get,
.init = bmips_cpufreq_init,
.exit = bmips_cpufreq_exit,
- .attr = cpufreq_generic_attr,
.name = BMIPS_CPUFREQ_PREFIX,
};
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 2fd0f6be6fa3..7b841a086acc 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -720,7 +720,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage);
cpufreq_freq_attr_ro(brcm_avs_frequency);
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&brcm_avs_pstate,
&brcm_avs_mode,
&brcm_avs_pmap,
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8f512448382f..b3d74f9adcf0 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -34,8 +34,6 @@
*/
static LIST_HEAD(cpu_data_list);
-static bool boost_supported;
-
static struct cpufreq_driver cppc_cpufreq_driver;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
@@ -653,7 +651,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* is supported.
*/
if (caps->highest_perf > caps->nominal_perf)
- boost_supported = true;
+ policy->boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
@@ -791,11 +789,6 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
- if (!boost_supported) {
- pr_err("BOOST not supported by CPU or firmware\n");
- return -EINVAL;
- }
-
if (state)
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 3a7c3372bda7..e80dd982a3e2 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,12 +36,6 @@ struct private_data {
static LIST_HEAD(priv_list);
-static struct freq_attr *cpufreq_dt_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Extra space for boost-attr if required */
- NULL,
-};
-
static struct private_data *cpufreq_dt_find_data(int cpu)
{
struct private_data *priv;
@@ -120,21 +114,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
- /* Support turbo/boost mode */
- if (policy_has_boost_freq(policy)) {
- /* This gets disabled by core on driver unregister */
- ret = cpufreq_enable_boost_support();
- if (ret)
- goto out_clk_put;
- cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- }
-
return 0;
-
-out_clk_put:
- clk_put(cpu_clk);
-
- return ret;
}
static int cpufreq_online(struct cpufreq_policy *policy)
@@ -169,7 +149,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.offline = cpufreq_offline,
.register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
- .attr = cpufreq_dt_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
@@ -303,7 +283,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
int ret, cpu;
/* Request resources early so we can return in case of -EPROBE_DEFER */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
if (ret)
goto err;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 30ffbddc7ece..70237d78d71f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -88,6 +88,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
static bool cpufreq_boost_supported(void);
+static int cpufreq_boost_trigger_state(int state);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -631,6 +632,9 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!cpufreq_driver->boost_enabled)
return -EINVAL;
+ if (!policy->boost_supported)
+ return -EINVAL;
+
if (policy->boost_enabled == enable)
return count;
@@ -1059,6 +1063,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct freq_attr **drv_attr;
int ret = 0;
+ /* Attributes that need freq_table */
+ if (policy->freq_table) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_available_freqs.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_boost_freqs.attr);
+ if (ret)
+ return ret;
+ }
+ }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
@@ -1571,14 +1590,14 @@ static int cpufreq_online(unsigned int cpu)
policy->cdev = of_cpufreq_cooling_register(policy);
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_driver->set_boost &&
+ if (cpufreq_driver->set_boost && policy->boost_supported &&
policy->boost_enabled != cpufreq_boost_enabled()) {
policy->boost_enabled = cpufreq_boost_enabled();
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
- policy->boost_enabled ? "enable" : "disable");
+ str_enable_disable(policy->boost_enabled));
policy->boost_enabled = !policy->boost_enabled;
}
}
@@ -2772,7 +2791,7 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
@@ -2791,8 +2810,9 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
-int cpufreq_boost_trigger_state(int state)
+static int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
@@ -2807,6 +2827,9 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ if (!policy->boost_supported)
+ continue;
+
policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
if (ret) {
@@ -2854,21 +2877,6 @@ static void remove_boost_sysfs_file(void)
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
-int cpufreq_enable_boost_support(void)
-{
- if (!cpufreq_driver)
- return -EINVAL;
-
- if (cpufreq_boost_supported())
- return 0;
-
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- /* This will get removed on driver unregister */
- return create_boost_sysfs_file();
-}
-EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-
bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index af44ee6a6430..1a7fcaf39cc9 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ /*
+ * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
+ * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
+ * off, where idle_time is calculated by the difference between
+ * time elapsed in jiffies and "busy time" obtained from CPU
+ * statistics. If a CPU is 100% busy, the time elapsed and busy
+ * time should grow with the same amount in two consecutive
+ * samples, but in practice there could be a tiny difference,
+ * making the accumulated idle time decrease sometimes. Hence,
+ * in this case, idle_time should be regarded as 0 in order to
+ * make the further process correct.
+ */
+ if (cur_idle_time > j_cdbs->prev_cpu_idle)
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ else
+ idle_time = 0;
+
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
@@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
- } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ } else if (unlikely(idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- if (time_elapsed >= idle_time) {
+ if (time_elapsed > idle_time)
load = 100 * (time_elapsed - idle_time) / time_elapsed;
- } else {
- /*
- * That can happen if idle_time is returned by
- * get_cpu_idle_time_jiffy(). In that case
- * idle_time is roughly equal to the difference
- * between time_elapsed and "busy time" obtained
- * from CPU statistics. Then, the "busy time"
- * can end up being greater than time_elapsed
- * (for example, if jiffies_64 and the CPU
- * statistics are updated by different CPUs),
- * so idle_time may in fact be negative. That
- * means, though, that the CPU was busy all
- * the time (on the rough average) during the
- * last sampling interval and 100 can be
- * returned as the load.
- */
- load = (int)idle_time < 0 ? 100 : 0;
- }
+ else
+ load = 0;
+
j_cdbs->prev_load = load;
}
- if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ if (unlikely(idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 8736be3a06ce..2c277eb3795a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = {
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
.name = "davinci",
- .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 6e958b09e1b5..d23a97ba6478 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -376,7 +376,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 4ce5eb35dc46..36494b855e41 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -194,7 +194,6 @@ static struct cpufreq_driver elanfreq_driver = {
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
.name = "elanfreq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 10e80d912b8d..c03a91502f84 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -14,7 +14,7 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
-bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
@@ -27,7 +27,6 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-EXPORT_SYMBOL_GPL(policy_has_boost_freq);
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
@@ -276,7 +275,6 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
* scaling_boost_frequencies_show - show available boost frequencies for
@@ -288,13 +286,6 @@ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
-
-struct freq_attr *cpufreq_generic_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
static int set_freq_table_sorted(struct cpufreq_policy *policy)
{
@@ -367,6 +358,10 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
if (ret)
return ret;
+ /* Driver's may have set this field already */
+ if (policy_has_boost_freq(policy))
+ policy->boost_supported = true;
+
return set_freq_table_sorted(policy);
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index f3c99f378ad6..db1c88e9d3f9 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -207,7 +207,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
- .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9c4cc01fd51a..fa81054fa411 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -936,6 +936,8 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
+static bool no_cas __ro_after_init;
+
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
/*
* Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
@@ -1041,6 +1043,10 @@ static void hybrid_refresh_cpu_capacity_scaling(void)
static void hybrid_init_cpu_capacity_scaling(bool refresh)
{
+ /* Bail out if enabling capacity-aware scheduling is prohibited. */
+ if (no_cas)
+ return;
+
/*
* If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
* scaling has been enabled already and the driver is just changing the
@@ -3688,6 +3694,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
+ return -ENODEV;
+ }
+
id = x86_match_cpu(hwp_support_ids);
if (id) {
hwp_forced = intel_pstate_hwp_is_enabled();
@@ -3743,15 +3758,6 @@ static int __init intel_pstate_init(void)
default_driver = &intel_cpufreq;
hwp_cpu_matched:
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists()) {
- pr_info("P-states controlled by the platform\n");
- return -ENODEV;
- }
-
if (!hwp_active && hwp_only)
return -ENOTSUPP;
@@ -3835,6 +3841,9 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "no_cas"))
+ no_cas = true;
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 312f2654d1d5..24b285cbeb8d 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.name = "kirkwood-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index bd6fe8638d39..68ccd73c8129 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -906,7 +906,6 @@ static struct cpufreq_driver longhaul_driver = {
.get = longhaul_get,
.init = longhaul_cpu_init,
.name = "longhaul",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index ed1a6dbad638..39a6c4315a60 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -91,7 +91,6 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index bd34bf0fafa5..1e8715ea1b77 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -299,15 +299,6 @@ static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret < 0) {
- pr_warn("cpufreq: Failed to enable boost: %d\n", ret);
- return ret;
- }
- loongson3_cpufreq_driver.boost_enabled = true;
- }
-
return 0;
}
@@ -337,8 +328,8 @@ static struct cpufreq_driver loongson3_cpufreq_driver = {
.offline = loongson3_cpufreq_cpu_offline,
.get = loongson3_cpufreq_get,
.target_index = loongson3_cpufreq_target,
- .attr = cpufreq_generic_attr,
.verify = cpufreq_generic_frequency_table_verify,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 9252ebd60373..74f1b4c796e4 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -293,7 +293,6 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
.register_em = mtk_cpufreq_register_em,
.fast_switch = mtk_cpufreq_hw_fast_switch,
.name = "mtk-cpufreq-hw",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
@@ -304,7 +303,7 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
struct regulator *cpu_reg;
/* Make sure that all CPU supplies are available before proceeding. */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 663f61565cf7..f3f02c4b6888 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -618,7 +618,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.exit = mtk_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_probe(struct platform_device *pdev)
@@ -632,7 +631,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"failed to get mtk cpufreq platform data\n");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
if (info)
continue;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 7f3cfe668f30..2aad4c04673c 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 106220c0fd11..bbb01d93b54b 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -147,7 +147,6 @@ static struct cpufreq_driver omap_driver = {
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
- .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index ef0a3216a386..69c19233fcd4 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.init = cpufreq_p4_cpu_init,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 5fc9cb480516..a3931349360f 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -245,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.exit = pas_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pas_cpufreq_target,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 6c9f0888a2a7..a22c22bd693a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 74ff6c47df29..80897ec8f00e 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index f0a4a6c31204..99d2244e03b0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -253,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 4271446c8725..fb2197dc170f 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -667,7 +667,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index a01170f7d01c..4e3ba6e68c32 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1143,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index ae79d909943b..6094c530bf57 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -386,12 +386,8 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
-#define SCALING_BOOST_FREQS_ATTR_INDEX 2
-
static struct freq_attr *powernv_cpu_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
- &cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
@@ -1128,9 +1124,7 @@ static int __init powernv_cpufreq_init(void)
goto out;
if (powernv_pstate_info.wof_enabled)
- powernv_cpufreq_driver.boost_enabled = true;
- else
- powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+ powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
@@ -1138,9 +1132,6 @@ static int __init powernv_cpufreq_init(void)
goto cleanup;
}
- if (powernv_pstate_info.wof_enabled)
- cpufreq_enable_boost_support();
-
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index b2e7e89feaac..8422704a3b10 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -306,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
struct of_phandle_args args;
int cpu, ret;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np)
continue;
@@ -566,12 +566,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret)
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- }
-
return qcom_cpufreq_hw_lmh_init(policy, index);
}
@@ -595,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
enable_irq(data->throttle_irq);
}
-static struct freq_attr *qcom_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- &cpufreq_freq_attr_scaling_boost_freqs,
- NULL
-};
-
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -615,8 +603,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
- .attr = qcom_cpufreq_hw_attr,
.ready = qcom_cpufreq_ready,
+ .set_boost = cpufreq_boost_set_sw,
};
static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 3a8ed723a23e..54f8117103c8 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -489,7 +489,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
nvmem_cell_put(speedbin_nvmem);
}
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -543,7 +543,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
@@ -557,7 +557,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
@@ -568,7 +568,7 @@ static int qcom_cpufreq_suspend(struct device *dev)
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
unsigned int cpu;
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index a37ce051236c..8d1f5ac59132 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -254,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 330c8d6cf93c..103d2519dff7 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -92,7 +92,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
.name = "sc520_freq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index b8fe758aeb01..c310aeebc8f3 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -104,7 +104,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
int cpu, tdomain;
struct device *tcpu_dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -171,12 +171,6 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit;
}
-static struct freq_attr *scmi_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
- NULL,
-};
-
static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
{
struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
@@ -303,17 +297,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible);
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- goto out_free_table;
- } else {
- scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- scmi_cpufreq_driver.boost_enabled = true;
- }
- }
-
ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
@@ -395,13 +378,13 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
+ .set_boost = cpufreq_boost_set_sw,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index cd89c1b9832c..17cda84f00df 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- u64 rate = policy->freq_table[index].frequency * 1000;
+ unsigned long freq_khz = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = freq_khz * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret)
return ret;
- if (clk_get_rate(priv->clk) != rate)
+ if (clk_get_rate(priv->clk) / 1000 != freq_khz)
return -EIO;
return 0;
@@ -64,7 +65,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (domain < 0)
return domain;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -183,7 +184,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index aa74036d0420..9c0b01e00508 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -151,7 +151,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d8ab5b01d46d..707c71090cc3 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -165,7 +165,6 @@ static struct cpufreq_driver spear_cpufreq_driver = {
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3fafedb983b5..3e6e85a92212 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -507,7 +507,6 @@ static struct cpufreq_driver centrino_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = centrino_target,
.get = get_cur_freq,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index f2076d72bf39..262cfbde9ca7 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -315,7 +315,6 @@ static struct cpufreq_driver speedstep_driver = {
.target_index = speedstep_target,
.init = speedstep_cpu_init,
.get = speedstep_get,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0ce9d4b6dfcc..39265884c3f1 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -295,7 +295,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 17d6a149f580..47d6840b3489 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -262,7 +262,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "speed%d", speed);
config.prop_name = name;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
@@ -288,7 +288,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
@@ -302,7 +302,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index c7761eb99f3c..cbabb726c664 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == cluster)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
return 0;
}
@@ -123,7 +130,6 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static struct cpufreq_frequency_table *init_vhint_table(
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 9055dd398e7f..9b4f516f313e 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -589,7 +589,6 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.exit = tegra194_cpufreq_exit,
.online = tegra194_cpufreq_online,
.offline = tegra194_cpufreq_offline,
- .attr = cpufreq_generic_attr,
};
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 0f86cdb7ec8a..65fea47b82e6 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -471,7 +471,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
- .attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index a050b3a6737f..7dd1b0c263c7 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -138,7 +138,7 @@ static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
cur_perf_domain = readl_relaxed(base + policy->cpu *
PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
continue;
@@ -265,7 +265,6 @@ static struct cpufreq_driver cpufreq_virt_driver = {
.verify = virt_cpufreq_verify_policy,
.target = virt_cpufreq_target,
.fast_switch = virt_cpufreq_fast_switch,
- .attr = cpufreq_generic_attr,
};
static int virt_cpufreq_driver_probe(struct platform_device *pdev)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7fe0981a7e46..a2ff535e8270 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -144,6 +144,9 @@ struct cpufreq_policy {
/* Per policy boost enabled flag. */
bool boost_enabled;
+ /* Per policy boost supported flag. */
+ bool boost_supported;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
@@ -210,6 +213,9 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+/* Scope based cleanup macro for cpufreq_policy kobject reference counting */
+DEFINE_FREE(put_cpufreq_policy, struct cpufreq_policy *, if (_T) cpufreq_cpu_put(_T))
+
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
@@ -778,10 +784,8 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#ifdef CONFIG_CPU_FREQ
-int cpufreq_boost_trigger_state(int state);
bool cpufreq_boost_enabled(void);
-int cpufreq_enable_boost_support(void);
-bool policy_has_boost_freq(struct cpufreq_policy *policy);
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
@@ -1150,23 +1154,14 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
return 0;
}
#else
-static inline int cpufreq_boost_trigger_state(int state)
-{
- return 0;
-}
static inline bool cpufreq_boost_enabled(void)
{
return false;
}
-static inline int cpufreq_enable_boost_support(void)
+static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
- return -EINVAL;
-}
-
-static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
-{
- return false;
+ return -EOPNOTSUPP;
}
static inline int
@@ -1198,7 +1193,6 @@ void arch_set_freq_scale(const struct cpumask *cpus,
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
-extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);