summaryrefslogtreecommitdiff
path: root/drivers/platform/x86/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/platform/x86/intel')
-rw-r--r--drivers/platform/x86/intel/Kconfig11
-rw-r--r--drivers/platform/x86/intel/Makefile4
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c6
-rw-r--r--drivers/platform/x86/intel/hid.c1
-rw-r--r--drivers/platform/x86/intel/ifs/core.c15
-rw-r--r--drivers/platform/x86/intel/intel_plr_tpmi.c354
-rw-r--r--drivers/platform/x86/intel/pmc/core.c262
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c17
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c75
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c4
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c4
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c4
-rw-r--r--drivers/platform/x86/intel/tpmi.c11
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c235
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h18
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c83
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h13
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c101
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c108
-rw-r--r--drivers/platform/x86/intel/vbtn.c1
24 files changed, 1017 insertions, 319 deletions
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index e9dc0c021029..ad50bbabec61 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -192,10 +192,14 @@ config INTEL_SMARTCONNECT
This driver checks to determine whether the device has Intel Smart
Connect enabled, and if so disables it.
+config INTEL_TPMI_POWER_DOMAINS
+ tristate
+
config INTEL_TPMI
tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)"
depends on INTEL_VSEC
depends on X86_64
+ select INTEL_TPMI_POWER_DOMAINS
help
The Intel Topology Aware Register and PM Capsule Interface (TPMI),
provides enumerable MMIO interface for power management features.
@@ -205,6 +209,13 @@ config INTEL_TPMI
To compile this driver as a module, choose M here: the module will
be called intel_vsec_tpmi.
+config INTEL_PLR_TPMI
+ tristate "Intel SoC TPMI Power Limit Reasons driver"
+ depends on INTEL_TPMI
+ help
+ This driver provides the TPMI power limit reasons status information
+ via debugfs files.
+
config INTEL_TURBO_MAX_3
bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
depends on X86_64 && SCHED_MC_PRIO
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index c1d5fe05e3f3..74db065c82d6 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -52,6 +52,10 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
# TPMI drivers
intel_vsec_tpmi-y := tpmi.o
obj-$(CONFIG_INTEL_TPMI) += intel_vsec_tpmi.o
+obj-$(CONFIG_INTEL_PLR_TPMI) += intel_plr_tpmi.o
+
+intel_tpmi_power_domains-y := tpmi_power_domains.o
+obj-$(CONFIG_INTEL_TPMI_POWER_DOMAINS) += intel_tpmi_power_domains.o
# Intel Uncore drivers
intel-rst-y := rst.o
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index 93f75ba1dafd..11503b1c85f3 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -270,7 +270,7 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
}
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ strscpy(board_info.type, "max17047");
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
@@ -361,7 +361,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
}
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ strscpy(board_info.type, "typec_fusb302");
board_info.dev_name = "fusb302";
board_info.fwnode = fwnode;
board_info.irq = fusb302_irq;
@@ -381,7 +381,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
board_info.fwnode = fwnode;
- strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+ strscpy(board_info.type, "pi3usb30532");
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
if (IS_ERR(data->pi3usb30532)) {
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index c7a827645864..10cd65497cc1 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -38,6 +38,7 @@ MODULE_PARM_DESC(enable_sw_tablet_mode,
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
+MODULE_DESCRIPTION("Intel HID Event hotkey driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
index 7b11198d85a1..33412a584836 100644
--- a/drivers/platform/x86/intel/ifs/core.c
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -11,16 +11,15 @@
#include "ifs.h"
-#define X86_MATCH(model, array_gen) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
- INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen)
+#define X86_MATCH(vfm, array_gen) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_CORE_CAPABILITIES, array_gen)
static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
- X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0),
- X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0),
- X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0),
- X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0),
- X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, ARRAY_GEN0),
+ X86_MATCH(INTEL_ATOM_CRESTMONT_X, ARRAY_GEN1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
diff --git a/drivers/platform/x86/intel/intel_plr_tpmi.c b/drivers/platform/x86/intel/intel_plr_tpmi.c
new file mode 100644
index 000000000000..69ace6a629bc
--- /dev/null
+++ b/drivers/platform/x86/intel/intel_plr_tpmi.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Performance Limit Reasons via TPMI
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/intel_tpmi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kstrtox.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+
+#include "tpmi_power_domains.h"
+
+#define PLR_HEADER 0x00
+#define PLR_MAILBOX_INTERFACE 0x08
+#define PLR_MAILBOX_DATA 0x10
+#define PLR_DIE_LEVEL 0x18
+
+#define PLR_MODULE_ID_MASK GENMASK_ULL(19, 12)
+#define PLR_RUN_BUSY BIT_ULL(63)
+
+#define PLR_COMMAND_WRITE 1
+
+#define PLR_INVALID GENMASK_ULL(63, 0)
+
+#define PLR_TIMEOUT_US 5
+#define PLR_TIMEOUT_MAX_US 1000
+
+#define PLR_COARSE_REASON_BITS 32
+
+struct tpmi_plr;
+
+struct tpmi_plr_die {
+ void __iomem *base;
+ struct mutex lock; /* Protect access to PLR mailbox */
+ int package_id;
+ int die_id;
+ struct tpmi_plr *plr;
+};
+
+struct tpmi_plr {
+ struct dentry *dbgfs_dir;
+ struct tpmi_plr_die *die_info;
+ int num_dies;
+ struct auxiliary_device *auxdev;
+};
+
+static const char * const plr_coarse_reasons[] = {
+ "FREQUENCY",
+ "CURRENT",
+ "POWER",
+ "THERMAL",
+ "PLATFORM",
+ "MCP",
+ "RAS",
+ "MISC",
+ "QOS",
+ "DFC",
+};
+
+static const char * const plr_fine_reasons[] = {
+ "FREQUENCY_CDYN0",
+ "FREQUENCY_CDYN1",
+ "FREQUENCY_CDYN2",
+ "FREQUENCY_CDYN3",
+ "FREQUENCY_CDYN4",
+ "FREQUENCY_CDYN5",
+ "FREQUENCY_FCT",
+ "FREQUENCY_PCS_TRL",
+ "CURRENT_MTPMAX",
+ "POWER_FAST_RAPL",
+ "POWER_PKG_PL1_MSR_TPMI",
+ "POWER_PKG_PL1_MMIO",
+ "POWER_PKG_PL1_PCS",
+ "POWER_PKG_PL2_MSR_TPMI",
+ "POWER_PKG_PL2_MMIO",
+ "POWER_PKG_PL2_PCS",
+ "POWER_PLATFORM_PL1_MSR_TPMI",
+ "POWER_PLATFORM_PL1_MMIO",
+ "POWER_PLATFORM_PL1_PCS",
+ "POWER_PLATFORM_PL2_MSR_TPMI",
+ "POWER_PLATFORM_PL2_MMIO",
+ "POWER_PLATFORM_PL2_PCS",
+ "UNKNOWN(22)",
+ "THERMAL_PER_CORE",
+ "DFC_UFS",
+ "PLATFORM_PROCHOT",
+ "PLATFORM_HOT_VR",
+ "UNKNOWN(27)",
+ "UNKNOWN(28)",
+ "MISC_PCS_PSTATE",
+};
+
+static u64 plr_read(struct tpmi_plr_die *plr_die, int offset)
+{
+ return readq(plr_die->base + offset);
+}
+
+static void plr_write(u64 val, struct tpmi_plr_die *plr_die, int offset)
+{
+ writeq(val, plr_die->base + offset);
+}
+
+static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu,
+ u64 *status)
+{
+ u64 regval;
+ int ret;
+
+ lockdep_assert_held(&plr_die->lock);
+
+ regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
+ regval |= PLR_RUN_BUSY;
+
+ plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
+
+ ret = readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
+ !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
+ PLR_TIMEOUT_MAX_US);
+ if (ret)
+ return ret;
+
+ *status = plr_read(plr_die, PLR_MAILBOX_DATA);
+
+ return 0;
+}
+
+static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu)
+{
+ u64 regval;
+
+ lockdep_assert_held(&plr_die->lock);
+
+ regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
+ regval |= PLR_RUN_BUSY | PLR_COMMAND_WRITE;
+
+ plr_write(0, plr_die, PLR_MAILBOX_DATA);
+
+ plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
+
+ return readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
+ !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
+ PLR_TIMEOUT_MAX_US);
+}
+
+static void plr_print_bits(struct seq_file *s, u64 val, int bits)
+{
+ const unsigned long mask[] = { BITMAP_FROM_U64(val) };
+ int bit, index;
+
+ for_each_set_bit(bit, mask, bits) {
+ const char *str = NULL;
+
+ if (bit < PLR_COARSE_REASON_BITS) {
+ if (bit < ARRAY_SIZE(plr_coarse_reasons))
+ str = plr_coarse_reasons[bit];
+ } else {
+ index = bit - PLR_COARSE_REASON_BITS;
+ if (index < ARRAY_SIZE(plr_fine_reasons))
+ str = plr_fine_reasons[index];
+ }
+
+ if (str)
+ seq_printf(s, " %s", str);
+ else
+ seq_printf(s, " UNKNOWN(%d)", bit);
+ }
+
+ if (!val)
+ seq_puts(s, " none");
+
+ seq_putc(s, '\n');
+}
+
+static int plr_status_show(struct seq_file *s, void *unused)
+{
+ struct tpmi_plr_die *plr_die = s->private;
+ int ret;
+ u64 val;
+
+ val = plr_read(plr_die, PLR_DIE_LEVEL);
+ seq_puts(s, "cpus");
+ plr_print_bits(s, val, 32);
+
+ guard(mutex)(&plr_die->lock);
+
+ for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
+ continue;
+
+ if (plr_die->package_id != topology_physical_package_id(cpu))
+ continue;
+
+ seq_printf(s, "cpu%d", cpu);
+ ret = plr_read_cpu_status(plr_die, cpu, &val);
+ if (ret) {
+ dev_err(&plr_die->plr->auxdev->dev, "Failed to read PLR for cpu %d, ret=%d\n",
+ cpu, ret);
+ return ret;
+ }
+
+ plr_print_bits(s, val, 64);
+ }
+
+ return 0;
+}
+
+static ssize_t plr_status_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct tpmi_plr_die *plr_die = s->private;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, count, &val);
+ if (ret)
+ return ret;
+
+ if (val != 0)
+ return -EINVAL;
+
+ plr_write(0, plr_die, PLR_DIE_LEVEL);
+
+ guard(mutex)(&plr_die->lock);
+
+ for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
+ continue;
+
+ if (plr_die->package_id != topology_physical_package_id(cpu))
+ continue;
+
+ plr_clear_cpu_status(plr_die, cpu);
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(plr_status);
+
+static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_tpmi_plat_info *plat_info;
+ struct dentry *dentry;
+ int i, num_resources;
+ struct resource *res;
+ struct tpmi_plr *plr;
+ void __iomem *base;
+ char name[16];
+ int err;
+
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return dev_err_probe(&auxdev->dev, -EINVAL, "No platform info\n");
+
+ dentry = tpmi_get_debugfs_dir(auxdev);
+ if (!dentry)
+ return dev_err_probe(&auxdev->dev, -ENODEV, "No TPMI debugfs directory.\n");
+
+ num_resources = tpmi_get_resource_count(auxdev);
+ if (!num_resources)
+ return -EINVAL;
+
+ plr = devm_kzalloc(&auxdev->dev, sizeof(*plr), GFP_KERNEL);
+ if (!plr)
+ return -ENOMEM;
+
+ plr->die_info = devm_kcalloc(&auxdev->dev, num_resources, sizeof(*plr->die_info),
+ GFP_KERNEL);
+ if (!plr->die_info)
+ return -ENOMEM;
+
+ plr->num_dies = num_resources;
+ plr->dbgfs_dir = debugfs_create_dir("plr", dentry);
+ plr->auxdev = auxdev;
+
+ for (i = 0; i < num_resources; i++) {
+ res = tpmi_get_resource_at_index(auxdev, i);
+ if (!res) {
+ err = dev_err_probe(&auxdev->dev, -EINVAL, "No resource\n");
+ goto err;
+ }
+
+ base = devm_ioremap_resource(&auxdev->dev, res);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ goto err;
+ }
+
+ plr->die_info[i].base = base;
+ plr->die_info[i].package_id = plat_info->package_id;
+ plr->die_info[i].die_id = i;
+ plr->die_info[i].plr = plr;
+ mutex_init(&plr->die_info[i].lock);
+
+ if (plr_read(&plr->die_info[i], PLR_HEADER) == PLR_INVALID)
+ continue;
+
+ snprintf(name, sizeof(name), "domain%d", i);
+
+ dentry = debugfs_create_dir(name, plr->dbgfs_dir);
+ debugfs_create_file("status", 0444, dentry, &plr->die_info[i],
+ &plr_status_fops);
+ }
+
+ auxiliary_set_drvdata(auxdev, plr);
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(plr->dbgfs_dir);
+ return err;
+}
+
+static void intel_plr_remove(struct auxiliary_device *auxdev)
+{
+ struct tpmi_plr *plr = auxiliary_get_drvdata(auxdev);
+
+ debugfs_remove_recursive(plr->dbgfs_dir);
+}
+
+static const struct auxiliary_device_id intel_plr_id_table[] = {
+ { .name = "intel_vsec.tpmi-plr" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_plr_id_table);
+
+static struct auxiliary_driver intel_plr_aux_driver = {
+ .id_table = intel_plr_id_table,
+ .remove = intel_plr_remove,
+ .probe = intel_plr_probe,
+};
+module_auxiliary_driver(intel_plr_aux_driver);
+
+MODULE_IMPORT_NS(INTEL_TPMI);
+MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
+MODULE_DESCRIPTION("Intel TPMI PLR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 2ad2f8753e5d..01ae71c6df59 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -87,35 +87,26 @@ static int set_etr3(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_reg_map *map = pmc->map;
u32 reg;
- int err;
if (!map->etr3_offset)
return -EOPNOTSUPP;
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
/* check if CF9 is locked */
reg = pmc_core_reg_read(pmc, map->etr3_offset);
- if (reg & ETR3_CF9LOCK) {
- err = -EACCES;
- goto out_unlock;
- }
+ if (reg & ETR3_CF9LOCK)
+ return -EACCES;
/* write CF9 global reset bit */
reg |= ETR3_CF9GR;
pmc_core_reg_write(pmc, map->etr3_offset, reg);
reg = pmc_core_reg_read(pmc, map->etr3_offset);
- if (!(reg & ETR3_CF9GR)) {
- err = -EIO;
- goto out_unlock;
- }
-
- err = 0;
+ if (!(reg & ETR3_CF9GR))
+ return -EIO;
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
static umode_t etr3_is_visible(struct kobject *kobj,
struct attribute *attr,
@@ -127,9 +118,8 @@ static umode_t etr3_is_visible(struct kobject *kobj,
const struct pmc_reg_map *map = pmc->map;
u32 reg;
- mutex_lock(&pmcdev->lock);
- reg = pmc_core_reg_read(pmc, map->etr3_offset);
- mutex_unlock(&pmcdev->lock);
+ scoped_guard(mutex, &pmcdev->lock)
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
}
@@ -145,12 +135,10 @@ static ssize_t etr3_show(struct device *dev,
if (!map->etr3_offset)
return -EOPNOTSUPP;
- mutex_lock(&pmcdev->lock);
-
- reg = pmc_core_reg_read(pmc, map->etr3_offset);
- reg &= ETR3_CF9GR | ETR3_CF9LOCK;
-
- mutex_unlock(&pmcdev->lock);
+ scoped_guard(mutex, &pmcdev->lock) {
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+ }
return sysfs_emit(buf, "0x%08x", reg);
}
@@ -257,9 +245,9 @@ static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
}
}
-static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
{
- int idx;
+ unsigned int idx;
for (idx = 0; maps[idx]; idx++)
;/* Nothing */
@@ -272,8 +260,8 @@ static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
const char *str,
const struct pmc_bit_map **maps)
{
- int index, idx, len = 32, bit_mask, arr_size;
- u32 *lpm_regs;
+ unsigned int index, idx, len = 32, arr_size;
+ u32 bit_mask, *lpm_regs;
arr_size = pmc_core_lpm_get_arr_size(maps);
lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
@@ -326,13 +314,13 @@ static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
const struct pmc_bit_map **maps;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter, idx, ip = 0;
+ unsigned int index, iter, idx, ip = 0;
if (!pmc)
continue;
@@ -391,7 +379,8 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
const struct pmc_bit_map *map = pmc->map->mphy_sts;
u32 mphy_core_reg_low, mphy_core_reg_high;
u32 val_low, val_high;
- int index, err = 0;
+ unsigned int index;
+ int err = 0;
if (pmcdev->pmc_xram_read_bit) {
seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
@@ -401,20 +390,18 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
- if (pmc_core_send_msg(pmc, &mphy_core_reg_low) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_core_reg_low);
+ if (err)
+ return err;
msleep(10);
val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
- if (pmc_core_send_msg(pmc, &mphy_core_reg_high) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_core_reg_high);
+ if (err)
+ return err;
msleep(10);
val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
@@ -433,9 +420,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
"Power gated");
}
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
@@ -445,7 +430,8 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map *map = pmc->map->pll_sts;
u32 mphy_common_reg, val;
- int index, err = 0;
+ unsigned int index;
+ int err = 0;
if (pmcdev->pmc_xram_read_bit) {
seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
@@ -453,12 +439,11 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
}
mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
- if (pmc_core_send_msg(pmc, &mphy_common_reg) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_common_reg);
+ if (err)
+ return err;
/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
msleep(10);
@@ -470,9 +455,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
map[index].bit_mask & val ? "Active" : "Idle");
}
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
@@ -481,7 +464,8 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
struct pmc *pmc;
const struct pmc_reg_map *map;
u32 reg;
- int pmc_index, ltr_index;
+ unsigned int pmc_index;
+ int ltr_index;
ltr_index = value;
/* For platforms with multiple pmcs, ltr index value given by user
@@ -511,7 +495,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
if (ignore)
@@ -520,48 +504,56 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
reg &= ~BIT(ltr_index);
pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return 0;
}
-static ssize_t pmc_core_ltr_ignore_write(struct file *file,
- const char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev,
+ const char __user *userbuf,
+ size_t count, int ignore)
{
- struct seq_file *s = file->private_data;
- struct pmc_dev *pmcdev = s->private;
- u32 buf_size, value;
+ u32 value;
int err;
- buf_size = min_t(u32, count, 64);
-
- err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+ err = kstrtou32_from_user(userbuf, count, 10, &value);
if (err)
return err;
- err = pmc_core_send_ltr_ignore(pmcdev, value, 1);
+ err = pmc_core_send_ltr_ignore(pmcdev, value, ignore);
+
+ return err ?: count;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
- return err == 0 ? count : err;
+ return pmc_core_ltr_write(pmcdev, userbuf, count, 1);
}
static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
{
return 0;
}
+DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore);
-static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+static ssize_t pmc_core_ltr_restore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+
+ return pmc_core_ltr_write(pmcdev, userbuf, count, 0);
}
-static const struct file_operations pmc_core_ltr_ignore_ops = {
- .open = pmc_core_ltr_ignore_open,
- .read = seq_read,
- .write = pmc_core_ltr_ignore_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
+static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore);
static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
{
@@ -569,10 +561,10 @@ static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
const struct pmc_reg_map *map = pmc->map;
u32 fd;
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
if (!reset && !slps0_dbg_latch)
- goto out_unlock;
+ return;
fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
if (reset)
@@ -582,9 +574,6 @@ static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
slps0_dbg_latch = false;
-
-out_unlock:
- mutex_unlock(&pmcdev->lock);
}
static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
@@ -639,17 +628,29 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
u32 ltr_raw_data, scale, val;
u16 snoop_ltr, nonsnoop_ltr;
- int i, index, ltr_index = 0;
+ unsigned int i, index, ltr_index = 0;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ struct pmc *pmc;
const struct pmc_bit_map *map;
+ u32 ltr_ign_reg;
+ pmc = pmcdev->pmcs[i];
if (!pmc)
continue;
+ scoped_guard(mutex, &pmcdev->lock)
+ ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
+
map = pmc->map->ltr_show_sts;
for (index = 0; map[index].name; index++) {
+ bool ltr_ign_data;
+
+ if (index > pmc->map->ltr_ignore_max)
+ ltr_ign_data = false;
+ else
+ ltr_ign_data = ltr_ign_reg & BIT(index);
+
decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
ltr_raw_data = pmc_core_reg_read(pmc,
map[index].bit_mask);
@@ -667,10 +668,10 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
decoded_snoop_ltr = val * convert_ltr_scale(scale);
}
- seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
ltr_index, i, map[index].name, ltr_raw_data,
decoded_non_snoop_ltr,
- decoded_snoop_ltr);
+ decoded_snoop_ltr, ltr_ign_data);
ltr_index++;
}
}
@@ -727,7 +728,8 @@ static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
u32 offset = pmc->map->lpm_residency_offset;
- int i, mode;
+ unsigned int i;
+ int mode;
seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
@@ -743,7 +745,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -764,7 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -785,7 +787,8 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
{
struct pmc_dev *pmcdev = s->private;
- int i, mode;
+ unsigned int i;
+ int mode;
seq_printf(s, "%30s |", "Element");
pmc_for_each_mode(i, mode, pmcdev)
@@ -799,7 +802,8 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
struct pmc_dev *pmcdev = s->private;
u32 sts_offset;
u32 *lpm_req_regs;
- int num_maps, mp, pmc_index;
+ unsigned int mp, pmc_index;
+ int num_maps;
for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
struct pmc *pmc = pmcdev->pmcs[pmc_index];
@@ -921,9 +925,10 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ unsigned int idx;
bool c10;
u32 reg;
- int idx, mode;
+ int mode;
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
if (reg & LPM_STS_LATCH_MODE) {
@@ -955,7 +960,8 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
bool clear = false, c10 = false;
unsigned char buf[8];
- int idx, m, mode;
+ unsigned int idx;
+ int m, mode;
u32 reg;
if (count > sizeof(buf) - 1)
@@ -987,26 +993,22 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
}
if (clear) {
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
reg |= ETR3_CLEAR_LPM_EVENTS;
pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return count;
}
if (c10) {
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
reg &= ~LPM_STS_LATCH_MODE;
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return count;
}
@@ -1015,9 +1017,8 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
* and clear everything else.
*/
reg = LPM_STS_LATCH_MODE | BIT(mode);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
- mutex_unlock(&pmcdev->lock);
return count;
}
@@ -1028,7 +1029,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
struct pmc *pmc = s->private;
const struct pmc_bit_map *map = pmc->map->msr_sts;
u64 pcstate_count;
- int index;
+ unsigned int index;
for (index = 0; map[index].name ; index++) {
if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
@@ -1046,7 +1047,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
{
- int i, j;
+ unsigned int i, j;
if (!lpm_pri)
return false;
@@ -1081,7 +1082,8 @@ void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
u8 mode_order[LPM_MAX_NUM_MODES];
u32 lpm_pri;
u32 lpm_en;
- int mode, i, p;
+ unsigned int i;
+ int mode, p;
/* Use LPM Maps to indicate support for substates */
if (!pmc->map->lpm_num_maps)
@@ -1228,7 +1230,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
- &pmc_core_ltr_ignore_ops);
+ &pmc_core_ltr_ignore_fops);
+
+ debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops);
debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
@@ -1293,29 +1297,29 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
}
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, arl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, lnl_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, spt_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE, spt_core_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, spt_core_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE, spt_core_init),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, cnp_core_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, icl_core_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, icl_core_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE, cnp_core_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, cnp_core_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, tgl_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, icl_core_init),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, tgl_core_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, adl_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, adl_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, adl_core_init),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, mtl_core_init),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, arl_core_init),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, lnl_core_init),
{}
};
@@ -1373,7 +1377,7 @@ static void pmc_core_do_dmi_quirks(struct pmc *pmc)
static void pmc_core_clean_structure(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -1536,7 +1540,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map **maps = pmc->map->lpm_sts;
int offset = pmc->map->lpm_status_offset;
- int i;
+ unsigned int i;
/* Check if the syspend used S0ix */
if (pm_suspend_via_firmware())
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index ddfba38c2104..3141d6cbc41b 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -35,14 +35,14 @@ static struct platform_device *pmc_core_device;
* other list may grow, but this list should not.
*/
static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
@@ -86,4 +86,5 @@ static void __exit pmc_core_platform_exit(void)
module_init(pmc_core_platform_init);
module_exit(pmc_core_platform_exit);
+MODULE_DESCRIPTION("Intel PMC Core platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 6bc9c4a603e0..f3a60e14d4c1 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+MODULE_DESCRIPTION("Intel Rapid Start Technology Driver");
MODULE_LICENSE("GPL");
static ssize_t irst_show_wakeup_events(struct device *dev,
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index cd25d0585324..31019a1a6d5e 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Intel Smart Connect disabling driver");
MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 713c0d1fa85f..10e21563fa46 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -718,14 +718,6 @@ static struct miscdevice isst_if_char_driver = {
.fops = &isst_if_char_driver_ops,
};
-static const struct x86_cpu_id hpm_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
- {}
-};
-
static int isst_misc_reg(void)
{
mutex_lock(&punit_misc_dev_reg_lock);
@@ -733,12 +725,6 @@ static int isst_misc_reg(void)
goto unlock_exit;
if (!misc_usage_count) {
- const struct x86_cpu_id *id;
-
- id = x86_match_cpu(hpm_cpu_ids);
- if (id)
- isst_hpm_support = true;
-
misc_device_ret = isst_if_cpu_info_init();
if (misc_device_ret)
goto unlock_exit;
@@ -786,11 +772,12 @@ static void isst_misc_unreg(void)
*/
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
{
- int ret;
-
if (device_type >= ISST_IF_DEV_MAX)
return -EINVAL;
+ if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support)
+ return -ENODEV;
+
mutex_lock(&punit_misc_dev_open_lock);
/* Device is already open, we don't want to add new callbacks */
if (misc_device_open) {
@@ -805,15 +792,6 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
punit_callbacks[device_type].registered = 1;
mutex_unlock(&punit_misc_dev_open_lock);
- ret = isst_misc_reg();
- if (ret) {
- /*
- * No need of mutex as the misc device register failed
- * as no one can open device yet. Hence no contention.
- */
- punit_callbacks[device_type].registered = 0;
- return ret;
- }
return 0;
}
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
@@ -829,7 +807,6 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
*/
void isst_if_cdev_unregister(int device_type)
{
- isst_misc_unreg();
mutex_lock(&punit_misc_dev_open_lock);
punit_callbacks[device_type].def_ioctl = NULL;
punit_callbacks[device_type].registered = 0;
@@ -839,5 +816,51 @@ void isst_if_cdev_unregister(int device_type)
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+#define SST_HPM_SUPPORTED 0x01
+#define SST_MBOX_SUPPORTED 0x02
+
+static const struct x86_cpu_id isst_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids);
+
+static int __init isst_if_common_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(isst_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == SST_HPM_SUPPORTED) {
+ isst_hpm_support = true;
+ } else if (id->driver_data == SST_MBOX_SUPPORTED) {
+ u64 data;
+
+ /* Can fail only on some Skylake-X generations */
+ if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
+ rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data))
+ return -ENODEV;
+ }
+
+ return isst_misc_reg();
+}
+module_init(isst_if_common_init)
+
+static void __exit isst_if_common_exit(void)
+{
+ isst_misc_unreg();
+}
+module_exit(isst_if_common_exit)
+
MODULE_DESCRIPTION("ISST common interface module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
index 1004f2c9cca8..378055fe1d16 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -16,6 +16,9 @@
#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251
#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259
+#define MSR_OS_MAILBOX_INTERFACE 0xB0
+#define MSR_OS_MAILBOX_DATA 0xB1
+
/*
* Validate maximum commands in a single request.
* This is enough to handle command to every core in one ioctl, or all
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
index 1b6eab071068..c4b7af00352b 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
@@ -21,8 +21,6 @@
#include "isst_if_common.h"
-#define MSR_OS_MAILBOX_INTERFACE 0xB0
-#define MSR_OS_MAILBOX_DATA 0xB1
#define MSR_OS_MAILBOX_BUSY_BIT 31
/*
@@ -161,7 +159,7 @@ static struct notifier_block isst_pm_nb = {
};
static const struct x86_cpu_id isst_if_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
index 1d4d0fbfd63c..70e5736c44c7 100644
--- a/drivers/platform/x86/intel/telemetry/debugfs.c
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
@@ -308,8 +308,8 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_debugfs_conf),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
{}
};
MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
index 06311d0e9451..767a0bc6c7ad 100644
--- a/drivers/platform/x86/intel/telemetry/pltdrv.c
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
@@ -177,8 +177,8 @@ static struct telemetry_plt_config telem_glk_config = {
};
static const struct x86_cpu_id telemetry_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_config),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_glk_config),
{}
};
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index 6c0cbccd80bb..83e8b1fe53b3 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -357,6 +357,15 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev,
}
EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
+
+ return tpmi_info->dbgfs_dir;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_debugfs_dir, INTEL_TPMI);
+
static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
{
struct intel_tpmi_info *tpmi_info = s->private;
@@ -577,6 +586,8 @@ static const char *intel_tpmi_name(enum intel_tpmi_id id)
return "uncore";
case TPMI_ID_SST:
return "sst";
+ case TPMI_ID_PLR:
+ return "plr";
default:
return NULL;
}
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
new file mode 100644
index 000000000000..4eb02553957c
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mapping of TPMI power domains CPU mapping
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
+
+#include "tpmi_power_domains.h"
+
+#define MSR_PM_LOGICAL_ID 0x54
+
+/*
+ * Struct of MSR 0x54
+ * [15:11] PM_DOMAIN_ID
+ * [10:3] MODULE_ID (aka IDI_AGENT_ID)
+ * [2:0] LP_ID
+ * For Atom:
+ * [2] Always 0
+ * [1:0] core ID within module
+ * For Core
+ * [2:1] Always 0
+ * [0] thread ID
+ */
+
+#define LP_ID_MASK GENMASK_ULL(2, 0)
+#define MODULE_ID_MASK GENMASK_ULL(10, 3)
+#define PM_DOMAIN_ID_MASK GENMASK_ULL(15, 11)
+
+/**
+ * struct tpmi_cpu_info - Mapping information for a CPU
+ * @hnode: Used to add mapping information to hash list
+ * @linux_cpu: Linux CPU number
+ * @pkg_id: Package ID of this CPU
+ * @punit_thread_id: Punit thread id of this CPU
+ * @punit_core_id: Punit core id
+ * @punit_domain_id: Power domain id from Punit
+ *
+ * Structure to store mapping information for a Linux CPU
+ * to a Punit core, thread and power domain.
+ */
+struct tpmi_cpu_info {
+ struct hlist_node hnode;
+ int linux_cpu;
+ u8 pkg_id;
+ u8 punit_thread_id;
+ u8 punit_core_id;
+ u8 punit_domain_id;
+};
+
+static DEFINE_PER_CPU(struct tpmi_cpu_info, tpmi_cpu_info);
+
+/* The dynamically assigned cpu hotplug state to free later */
+static enum cpuhp_state tpmi_hp_state __read_mostly;
+
+#define MAX_POWER_DOMAINS 8
+
+static cpumask_t *tpmi_power_domain_mask;
+
+/* Lock to protect tpmi_power_domain_mask and tpmi_cpu_hash */
+static DEFINE_MUTEX(tpmi_lock);
+
+static const struct x86_cpu_id tpmi_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids);
+
+static DECLARE_HASHTABLE(tpmi_cpu_hash, 8);
+
+static bool tpmi_domain_is_valid(struct tpmi_cpu_info *info)
+{
+ return info->pkg_id < topology_max_packages() &&
+ info->punit_domain_id < MAX_POWER_DOMAINS;
+}
+
+int tpmi_get_linux_cpu_number(int package_id, int domain_id, int punit_core_id)
+{
+ struct tpmi_cpu_info *info;
+ int ret = -EINVAL;
+
+ guard(mutex)(&tpmi_lock);
+ hash_for_each_possible(tpmi_cpu_hash, info, hnode, punit_core_id) {
+ if (info->punit_domain_id == domain_id && info->pkg_id == package_id) {
+ ret = info->linux_cpu;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_cpu_number, INTEL_TPMI_POWER_DOMAIN);
+
+int tpmi_get_punit_core_number(int cpu_no)
+{
+ if (cpu_no >= num_possible_cpus())
+ return -EINVAL;
+
+ return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_punit_core_number, INTEL_TPMI_POWER_DOMAIN);
+
+int tpmi_get_power_domain_id(int cpu_no)
+{
+ if (cpu_no >= num_possible_cpus())
+ return -EINVAL;
+
+ return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_id, INTEL_TPMI_POWER_DOMAIN);
+
+cpumask_t *tpmi_get_power_domain_mask(int cpu_no)
+{
+ struct tpmi_cpu_info *info;
+ cpumask_t *mask;
+ int index;
+
+ if (cpu_no >= num_possible_cpus())
+ return NULL;
+
+ info = &per_cpu(tpmi_cpu_info, cpu_no);
+ if (!tpmi_domain_is_valid(info))
+ return NULL;
+
+ index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id;
+ guard(mutex)(&tpmi_lock);
+ mask = &tpmi_power_domain_mask[index];
+
+ return mask;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_mask, INTEL_TPMI_POWER_DOMAIN);
+
+static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
+{
+ u64 data;
+ int ret;
+
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (ret)
+ return ret;
+
+ info->punit_domain_id = FIELD_GET(PM_DOMAIN_ID_MASK, data);
+ if (info->punit_domain_id >= MAX_POWER_DOMAINS)
+ return -EINVAL;
+
+ info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
+ info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
+ info->pkg_id = topology_physical_package_id(cpu);
+ info->linux_cpu = cpu;
+
+ return 0;
+}
+
+static int tpmi_cpu_online(unsigned int cpu)
+{
+ struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu);
+ int ret, index;
+
+ /* Don't fail CPU online for some bad mapping of CPUs */
+ ret = tpmi_get_logical_id(cpu, info);
+ if (ret)
+ return 0;
+
+ index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id;
+
+ guard(mutex)(&tpmi_lock);
+ cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]);
+ hash_add(tpmi_cpu_hash, &info->hnode, info->punit_core_id);
+
+ return 0;
+}
+
+static int __init tpmi_init(void)
+{
+ const struct x86_cpu_id *id;
+ u64 data;
+ int ret;
+
+ id = x86_match_cpu(tpmi_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /* Check for MSR 0x54 presence */
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (ret)
+ return ret;
+
+ tpmi_power_domain_mask = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS),
+ sizeof(*tpmi_power_domain_mask), GFP_KERNEL);
+ if (!tpmi_power_domain_mask)
+ return -ENOMEM;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/tpmi_power_domains:online",
+ tpmi_cpu_online, NULL);
+ if (ret < 0) {
+ kfree(tpmi_power_domain_mask);
+ return ret;
+ }
+
+ tpmi_hp_state = ret;
+
+ return 0;
+}
+module_init(tpmi_init)
+
+static void __exit tpmi_exit(void)
+{
+ cpuhp_remove_state(tpmi_hp_state);
+ kfree(tpmi_power_domain_mask);
+}
+module_exit(tpmi_exit)
+
+MODULE_DESCRIPTION("TPMI Power Domains Mapping");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.h b/drivers/platform/x86/intel/tpmi_power_domains.h
new file mode 100644
index 000000000000..e35750dd9273
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi_power_domains.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Mapping of TPMI power domain and CPUs
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#ifndef _TPMI_POWER_DOMAINS_H_
+#define _TPMI_POWER_DOMAINS_H_
+
+#include <linux/cpumask.h>
+
+int tpmi_get_linux_cpu_number(int package_id, int die_id, int punit_core_id);
+int tpmi_get_punit_core_number(int cpu_no);
+int tpmi_get_power_domain_id(int cpu_no);
+cpumask_t *tpmi_get_power_domain_mask(int cpu_no);
+
+#endif
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
index 892140b62898..79a0bcdeffb8 100644
--- a/drivers/platform/x86/intel/turbo_max_3.c
+++ b/drivers/platform/x86/intel/turbo_max_3.c
@@ -114,8 +114,8 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
}
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
{}
};
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 33bb58dc3f78..4e880585cbe4 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -19,9 +19,8 @@ static int uncore_instance_count;
static DEFINE_IDA(intel_uncore_ida);
/* callbacks for actual HW read/write */
-static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max);
-static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
-static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index);
+static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index);
static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
@@ -44,27 +43,22 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr
return sprintf(buf, "%u\n", data->package_id);
}
-static ssize_t show_min_max_freq_khz(struct uncore_data *data,
- char *buf, int min_max)
+static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index)
{
- unsigned int min, max;
+ unsigned int value;
int ret;
mutex_lock(&uncore_lock);
- ret = uncore_read(data, &min, &max);
+ ret = uncore_read(data, &value, index);
mutex_unlock(&uncore_lock);
if (ret)
return ret;
- if (min_max)
- return sprintf(buf, "%u\n", max);
-
- return sprintf(buf, "%u\n", min);
+ return sprintf(buf, "%u\n", value);
}
-static ssize_t store_min_max_freq_khz(struct uncore_data *data,
- const char *buf, ssize_t count,
- int min_max)
+static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count,
+ enum uncore_index index)
{
unsigned int input;
int ret;
@@ -73,7 +67,7 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
return -EINVAL;
mutex_lock(&uncore_lock);
- ret = uncore_write(data, input, min_max);
+ ret = uncore_write(data, input, index);
mutex_unlock(&uncore_lock);
if (ret)
@@ -82,56 +76,32 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
return count;
}
-static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
-{
- unsigned int freq;
- int ret;
-
- mutex_lock(&uncore_lock);
- ret = uncore_read_freq(data, &freq);
- mutex_unlock(&uncore_lock);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", freq);
-}
-
-#define store_uncore_min_max(name, min_max) \
+#define store_uncore_attr(name, index) \
static ssize_t store_##name(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
- return store_min_max_freq_khz(data, buf, count, \
- min_max); \
+ return store_attr(data, buf, count, index); \
}
-#define show_uncore_min_max(name, min_max) \
+#define show_uncore_attr(name, index) \
static ssize_t show_##name(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf)\
{ \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
- return show_min_max_freq_khz(data, buf, min_max); \
- }
-
-#define show_uncore_perf_status(name) \
- static ssize_t show_##name(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf)\
- { \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
- \
- return show_perf_status_freq_khz(data, buf); \
+ return show_attr(data, buf, index); \
}
-store_uncore_min_max(min_freq_khz, 0);
-store_uncore_min_max(max_freq_khz, 1);
+store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
-show_uncore_min_max(min_freq_khz, 0);
-show_uncore_min_max(max_freq_khz, 1);
+show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
-show_uncore_perf_status(current_freq_khz);
+show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ);
#define show_uncore_data(member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
@@ -198,7 +168,7 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
- ret = uncore_read_freq(data, &freq);
+ ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ);
if (!ret)
data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
@@ -238,7 +208,8 @@ int uncore_freq_add_entry(struct uncore_data *data, int cpu)
sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id);
}
- uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz);
+ uncore_read(data, &data->initial_min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+ uncore_read(data, &data->initial_max_freq_khz, UNCORE_INDEX_MAX_FREQ);
ret = create_attr_group(data, data->name);
if (ret) {
@@ -269,15 +240,15 @@ void uncore_freq_remove_die_entry(struct uncore_data *data)
}
EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY);
-int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
- int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max),
- int (*read_freq)(struct uncore_data *data, unsigned int *freq))
+int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index),
+ int (*write)(struct uncore_data *data, unsigned int input,
+ enum uncore_index index))
{
mutex_lock(&uncore_lock);
- uncore_read = read_control_freq;
- uncore_write = write_control_freq;
- uncore_read_freq = read_freq;
+ uncore_read = read;
+ uncore_write = write;
if (!uncore_root_kobj) {
struct device *dev_root = bus_get_dev_root(&cpu_subsys);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 0e5bf507e555..4c245b945e4e 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -66,9 +66,16 @@ struct uncore_data {
#define UNCORE_DOMAIN_ID_INVALID -1
-int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
- int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max),
- int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq));
+enum uncore_index {
+ UNCORE_INDEX_MIN_FREQ,
+ UNCORE_INDEX_MAX_FREQ,
+ UNCORE_INDEX_CURRENT_FREQ,
+};
+
+int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index),
+ int (*write)(struct uncore_data *data, unsigned int input,
+ enum uncore_index index));
void uncore_freq_common_exit(void);
int uncore_freq_add_entry(struct uncore_data *data, int cpu);
void uncore_freq_remove_die_entry(struct uncore_data *data);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bb8e72deb354..9fa3037c03d1 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -69,26 +69,31 @@ struct tpmi_uncore_struct {
bool write_blocked;
};
-#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15)
-#define UNCORE_GENMASK_MAX_RATIO GENMASK_ULL(14, 8)
-#define UNCORE_GENMASK_CURRENT_RATIO GENMASK_ULL(6, 0)
+/* Bit definitions for STATUS register */
+#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
+
+/* Bit definitions for CONTROL register */
+#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8)
+#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15)
/* Helper function to read MMIO offset for max/min control frequency */
static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
- unsigned int *min, unsigned int *max)
+ unsigned int *value, enum uncore_index index)
{
u64 control;
control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
- *max = FIELD_GET(UNCORE_GENMASK_MAX_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
- *min = FIELD_GET(UNCORE_GENMASK_MIN_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (index == UNCORE_INDEX_MAX_FREQ)
+ *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ else
+ *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
}
-#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_GENMASK_MAX_RATIO)
+#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK)
-/* Callback for sysfs read for max/min frequencies. Called under mutex locks */
-static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
- unsigned int *max)
+/* Helper for sysfs read for max/min frequencies. Called under mutex locks */
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
@@ -96,10 +101,11 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
if (cluster_info->root_domain) {
struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
- int i, _min = 0, _max = 0;
+ unsigned int min, max, v;
+ int i;
- *min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
- *max = 0;
+ min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
+ max = 0;
/*
* Get the max/min by looking at each cluster. Get the lowest
@@ -110,35 +116,41 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
- &_min, &_max);
- if (*min > _min)
- *min = _min;
- if (*max < _max)
- *max = _max;
+ &v, index);
+ if (v < min)
+ min = v;
+ if (v > max)
+ max = v;
}
}
+
+ if (index == UNCORE_INDEX_MIN_FREQ)
+ *value = min;
+ else
+ *value = max;
+
return 0;
}
- read_control_freq(cluster_info, min, max);
+ read_control_freq(cluster_info, value, index);
return 0;
}
/* Helper function to write MMIO offset for max/min control frequency */
static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
- unsigned int min_max)
+ unsigned int index)
{
u64 control;
control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
- if (min_max) {
- control &= ~UNCORE_GENMASK_MAX_RATIO;
- control |= FIELD_PREP(UNCORE_GENMASK_MAX_RATIO, input);
+ if (index == UNCORE_INDEX_MAX_FREQ) {
+ control &= ~UNCORE_MAX_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
} else {
- control &= ~UNCORE_GENMASK_MIN_RATIO;
- control |= FIELD_PREP(UNCORE_GENMASK_MIN_RATIO, input);
+ control &= ~UNCORE_MIN_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
@@ -146,7 +158,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un
/* Callback for sysfs write for max/min frequencies. Called under mutex locks */
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
- unsigned int min_max)
+ enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
struct tpmi_uncore_struct *uncore_root;
@@ -171,10 +183,10 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
- input, min_max);
+ input, index);
}
- if (min_max)
+ if (index == UNCORE_INDEX_MAX_FREQ)
uncore_root->max_ratio = input;
else
uncore_root->min_ratio = input;
@@ -182,18 +194,20 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
return 0;
}
- if (min_max && uncore_root->max_ratio && uncore_root->max_ratio < input)
+ if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
+ uncore_root->max_ratio < input)
return -EINVAL;
- if (!min_max && uncore_root->min_ratio && uncore_root->min_ratio > input)
+ if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
+ uncore_root->min_ratio > input)
return -EINVAL;
- write_control_freq(cluster_info, input, min_max);
+ write_control_freq(cluster_info, input, index);
return 0;
}
-/* Callback for sysfs read for the current uncore frequency. Called under mutex locks */
+/* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
{
struct tpmi_uncore_cluster_info *cluster_info;
@@ -204,11 +218,29 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
return -ENODATA;
status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
- *freq = FIELD_GET(UNCORE_GENMASK_CURRENT_RATIO, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
+/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
+static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_read_control_freq(data, value, index);
+
+ case UNCORE_INDEX_CURRENT_FREQ:
+ return uncore_read_freq(data, value);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
{
int i;
@@ -259,8 +291,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
return -EINVAL;
/* Register callbacks to uncore core */
- ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
- uncore_read_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
if (ret)
return ret;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index b89c0dda9e5d..a450b8a6bcec 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -14,6 +14,7 @@
* Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -36,8 +37,13 @@ static enum cpuhp_state uncore_hp_state __read_mostly;
#define MSR_UNCORE_PERF_STATUS 0x621
#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
-static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
- unsigned int *max)
+#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(6, 0)
+#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(14, 8)
+
+#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
+
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index)
{
u64 cap;
int ret;
@@ -49,20 +55,22 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
if (ret)
return ret;
- *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
- *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (index == UNCORE_INDEX_MAX_FREQ)
+ *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ else
+ *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
- unsigned int min_max)
+ enum uncore_index index)
{
int ret;
u64 cap;
input /= UNCORE_FREQ_KHZ_MULTIPLIER;
- if (!input || input > 0x7F)
+ if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK))
return -EINVAL;
if (data->control_cpu < 0)
@@ -72,12 +80,12 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
if (ret)
return ret;
- if (min_max) {
- cap &= ~0x7F;
- cap |= input;
+ if (index == UNCORE_INDEX_MAX_FREQ) {
+ cap &= ~UNCORE_MAX_RATIO_MASK;
+ cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
} else {
- cap &= ~GENMASK(14, 8);
- cap |= (input << 8);
+ cap &= ~UNCORE_MIN_RATIO_MASK;
+ cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
@@ -101,11 +109,28 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
if (ret)
return ret;
- *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
+static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_read_control_freq(data, value, index);
+
+ case UNCORE_INDEX_CURRENT_FREQ:
+ return uncore_read_freq(data, value);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
/* Caller provides protection */
static struct uncore_data *uncore_get_instance(unsigned int cpu)
{
@@ -197,34 +222,34 @@ static struct notifier_block uncore_pm_nb = {
};
static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
@@ -248,8 +273,7 @@ static int __init intel_uncore_init(void)
if (!uncore_instances)
return -ENOMEM;
- ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
- uncore_read_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
if (ret)
goto err_free;
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 84c1353eb12b..9b7ce03ba085 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -24,6 +24,7 @@
#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+MODULE_DESCRIPTION("Intel Virtual Button driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");