diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
24 files changed, 401 insertions, 433 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 249cb0aeb5ae..41472ed99253 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -310,7 +310,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; - enum amd_dpm_forced_level current_level = 0xff; + enum amd_dpm_forced_level current_level; int ret = 0; if (amdgpu_in_reset(adev)) @@ -350,6 +350,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, if (pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev); + else + current_level = adev->pm.dpm.forced_level; if (current_level == level) { pm_runtime_mark_last_busy(ddev->dev); @@ -2019,15 +2021,15 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC, .attr_update = ss_power_attr_update), AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC, @@ -2087,10 +2089,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (asic_type < CHIP_VEGA12) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(asic_type == CHIP_VANGOGH)) + if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(asic_type == CHIP_VANGOGH)) + if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { + if (!adev->powerplay.pp_funcs->get_power_profile_mode || + amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 8156729c370b..3557f4e7fc30 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -1008,7 +1008,9 @@ struct pptable_funcs { /** * @set_power_limit: Set power limit in watts. */ - int (*set_power_limit)(struct smu_context *smu, uint32_t n); + int (*set_power_limit)(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); /** * @init_max_sustainable_clocks: Populate max sustainable clock speed diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index cbdae8a2c698..2d422e6a9feb 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -197,7 +197,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu); int smu_v11_0_get_current_power_limit(struct smu_context *smu, uint32_t *power_limit); -int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n); +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index dc91eb608791..e5d3b0d1a032 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -163,7 +163,9 @@ int smu_v13_0_notify_display_change(struct smu_context *smu); int smu_v13_0_get_current_power_limit(struct smu_context *smu, uint32_t *power_limit); -int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n); +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h index 1d3447991d0c..fc9198846e70 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h @@ -51,7 +51,7 @@ #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type +#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used) #define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer @@ -63,7 +63,7 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) -#define PPSMC_MSG_SPARE0 0x16 ///< Spared +#define PPSMC_MSG_SPARE 0x16 ///< Spare #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 321215003643..8d796ed3b7d1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; + int ret; - if (!hwmgr || !hwmgr->pm_en || !buf) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) + return -EOPNOTSUPP; + if (!buf) return -EINVAL; - if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return snprintf(buf, PAGE_SIZE, "\n"); - } - - return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; - if (!hwmgr || !hwmgr->pm_en) - return ret; - - if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) return ret; - } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("power profile setting is for manual dpm mode only.\n"); - return ret; + return -EINVAL; } mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h index b7e2651b570b..2fc1733bcdcf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h @@ -29,9 +29,9 @@ typedef enum atom_smu9_syspll0_clock_id BIOS_CLKID; #define GetIndexIntoMasterCmdTable(FieldName) \ - (((char*)(&((struct atom_master_list_of_command_functions_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t)) + (offsetof(struct atom_master_list_of_command_functions_v2_1, FieldName) / sizeof(uint16_t)) #define GetIndexIntoMasterDataTable(FieldName) \ - (((char*)(&((struct atom_master_list_of_data_tables_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t)) + (offsetof(struct atom_master_list_of_data_tables_v2_1, FieldName) / sizeof(uint16_t)) #define PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES 32 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1de3ae77e03e..258c573acc97 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1024,6 +1024,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t min_freq, max_freq = 0; uint32_t ret = 0; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); @@ -1065,7 +1067,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -1081,7 +1083,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } @@ -1456,6 +1458,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index e7803ce8f67a..aceebf584225 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4914,6 +4914,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, int size = 0; uint32_t i, now, clock, pcie_speed; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); @@ -4963,7 +4965,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, @@ -4972,7 +4974,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, @@ -4981,7 +4983,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); @@ -5518,6 +5520,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b94a77e4e714..8e28a8eecefc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1550,6 +1550,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t i, now; int size = 0; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h index ad33983a8064..2a75da1e9f03 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h @@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void phm_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c152a61ddd2c..c981fc2882f0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -4637,6 +4639,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0, count = 0; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: if (data->registry_data.sclk_dpm_key_disabled) @@ -4717,7 +4721,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", @@ -4727,7 +4731,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", @@ -4737,7 +4741,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); @@ -5112,6 +5116,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 8558718e15a8..f7e783e1c888 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -2244,6 +2246,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0; struct pp_clock_levels_with_latency clocks; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: PP_ASSERT_WITH_CODE( diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 0cf39c1244b1..03e63be4ee27 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -3364,6 +3366,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, int ret = 0; uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); @@ -3479,7 +3483,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", od_table->GfxclkFmin); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -3489,7 +3493,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3503,7 +3507,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE"); size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); @@ -3518,7 +3522,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { @@ -4003,6 +4007,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7], title[8], title[9], title[10]); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 04863a797115..01168b8955bf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -455,7 +455,11 @@ static int smu_get_power_num_states(void *handle, bool is_support_sw_smu(struct amdgpu_device *adev) { - if (adev->asic_type >= CHIP_ARCTURUS) + /* vega20 is 11.0.2, but it's supported via the powerplay code */ + if (adev->asic_type == CHIP_VEGA20) + return false; + + if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) return true; return false; @@ -575,41 +579,43 @@ static int smu_set_funcs(struct amdgpu_device *adev) if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): navi10_set_ppt_funcs(smu); break; - case CHIP_ARCTURUS: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - arcturus_set_ppt_funcs(smu); - /* OD is not supported on Arcturus */ - smu->od_enabled =false; - break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): sienna_cichlid_set_ppt_funcs(smu); break; - case CHIP_ALDEBARAN: - aldebaran_set_ppt_funcs(smu); - /* Enable pp_od_clk_voltage node */ - smu->od_enabled = true; - break; - case CHIP_RENOIR: + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): renoir_set_ppt_funcs(smu); break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): vangogh_set_ppt_funcs(smu); break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): yellow_carp_set_ppt_funcs(smu); break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(11, 0, 8): cyan_skillfish_set_ppt_funcs(smu); break; + case IP_VERSION(11, 0, 2): + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + arcturus_set_ppt_funcs(smu); + /* OD is not supported on Arcturus */ + smu->od_enabled =false; + break; + case IP_VERSION(13, 0, 2): + aldebaran_set_ppt_funcs(smu); + /* Enable pp_od_clk_voltage node */ + smu->od_enabled = true; + break; default: return -EINVAL; } @@ -694,7 +700,8 @@ static int smu_late_init(void *handle) return ret; } - if (adev->asic_type == CHIP_YELLOW_CARP) + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { @@ -1140,9 +1147,16 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); /* this is needed specifically */ - if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && - (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): ret = smu_system_features_control(smu, true); + break; + default: + break; + } return ret; } @@ -1284,7 +1298,7 @@ static int smu_start_smc_engine(struct smu_context *smu) int ret = 0; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - if (adev->asic_type < CHIP_NAVI10) { + if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { if (smu->ppt_funcs->load_microcode) { ret = smu->ppt_funcs->load_microcode(smu); if (ret) @@ -1402,23 +1416,41 @@ static int smu_disable_dpms(struct smu_context *smu) * - SMU firmware can handle the DPM reenablement * properly. */ - if (smu->uploading_custom_pp_table && - (adev->asic_type >= CHIP_NAVI10) && - (adev->asic_type <= CHIP_BEIGE_GOBY)) - return smu_disable_all_features_with_exception(smu, - true, - SMU_FEATURE_COUNT); + if (smu->uploading_custom_pp_table) { + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + return smu_disable_all_features_with_exception(smu, + true, + SMU_FEATURE_COUNT); + default: + break; + } + } /* * For Sienna_Cichlid, PMFW will handle the features disablement properly * on BACO in. Driver involvement is unnecessary. */ - if (((adev->asic_type == CHIP_SIENNA_CICHLID) || - ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) && - use_baco) - return smu_disable_all_features_with_exception(smu, - true, - SMU_FEATURE_BACO_BIT); + if (use_baco) { + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + return smu_disable_all_features_with_exception(smu, + true, + SMU_FEATURE_BACO_BIT); + default: + break; + } + } /* * For gpu reset, runpm and hibernation through BACO, @@ -1436,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->asic_type >= CHIP_NAVI10 && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -2229,6 +2261,7 @@ int smu_get_power_limit(void *handle, enum pp_power_type pp_power_type) { struct smu_context *smu = handle; + struct amdgpu_device *adev = smu->adev; enum smu_ppt_limit_level limit_level; uint32_t limit_type; int ret = 0; @@ -2272,15 +2305,20 @@ int smu_get_power_limit(void *handle, } else { switch (limit_level) { case SMU_PPT_LIMIT_CURRENT: - if ((smu->adev->asic_type == CHIP_ALDEBARAN) || - (smu->adev->asic_type == CHIP_SIENNA_CICHLID) || - (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) || - (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) || - (smu->adev->asic_type == CHIP_BEIGE_GOBY)) + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 2): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): ret = smu_get_asic_power_limits(smu, &smu->current_power_limit, NULL, NULL); + break; + default: + break; + } *limit = smu->current_power_limit; break; case SMU_PPT_LIMIT_DEFAULT: @@ -2310,9 +2348,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit) mutex_lock(&smu->mutex); + limit &= (1<<24)-1; if (limit_type != SMU_DEFAULT_PPT_LIMIT) if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit); + ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); goto out; } @@ -2328,7 +2367,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit) limit = smu->current_power_limit; if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit); + ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.power_limit = limit; } @@ -2495,13 +2534,15 @@ static int smu_get_power_profile_mode(void *handle, char *buf) struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->get_power_profile_mode) return -EOPNOTSUPP; + if (!buf) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_power_profile_mode) - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); mutex_unlock(&smu->mutex); @@ -2515,7 +2556,8 @@ static int smu_set_power_profile_mode(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; mutex_lock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 082f01893f3d..fd1d30a93db5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -436,6 +436,19 @@ static void arcturus_check_bxco_support(struct smu_context *smu) } } +static void arcturus_check_fan_support(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + + /* No sort of fan control possible if PPTable has it disabled */ + smu->adev->pm.no_fan = + !(pptable->FeaturesToRun[0] & FEATURE_FAN_CONTROL_MASK); + if (smu->adev->pm.no_fan) + dev_info_once(smu->adev->dev, + "PMFW based fan control disabled"); +} + static int arcturus_check_powerplay_table(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -443,6 +456,7 @@ static int arcturus_check_powerplay_table(struct smu_context *smu) table_context->power_play_table; arcturus_check_bxco_support(smu); + arcturus_check_fan_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 3d4c65bc29dc..cbc3f99e8573 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -47,7 +47,6 @@ /* unit: MHz */ #define CYAN_SKILLFISH_SCLK_MIN 1000 #define CYAN_SKILLFISH_SCLK_MAX 2000 -#define CYAN_SKILLFISH_SCLK_DEFAULT 1800 /* unit: mV */ #define CYAN_SKILLFISH_VDDC_MIN 700 @@ -59,6 +58,8 @@ static struct gfx_user_settings { uint32_t vddc; } cyan_skillfish_user_settings; +static uint32_t cyan_skillfish_sclk_default; + #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ @@ -365,13 +366,19 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) return false; ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); - if (ret) return false; feature_enabled = (uint64_t)feature_mask[0] | ((uint64_t)feature_mask[1] << 32); + /* + * cyan_skillfish specific, query default sclk inseted of hard code. + */ + if (!cyan_skillfish_sclk_default) + cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, + &cyan_skillfish_sclk_default); + return !!(feature_enabled & SMC_DPM_FEATURE); } @@ -444,14 +451,14 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - if (input[1] <= CYAN_SKILLFISH_SCLK_MIN || + if (input[1] < CYAN_SKILLFISH_SCLK_MIN || input[1] > CYAN_SKILLFISH_SCLK_MAX) { dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); return -EINVAL; } - if (input[2] <= CYAN_SKILLFISH_VDDC_MIN || + if (input[2] < CYAN_SKILLFISH_VDDC_MIN || input[2] > CYAN_SKILLFISH_VDDC_MAX) { dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); @@ -468,7 +475,7 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT; + cyan_skillfish_user_settings.sclk = cyan_skillfish_sclk_default; cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index b1ad451af06b..71161f6b78fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -86,21 +86,21 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), - MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), - MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), - MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), - MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0), MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0), MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), - MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), - MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), @@ -345,7 +345,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, /* DPM UCLK enablement should be skipped for navi10 A0 secure board */ if (!(is_asic_secure(smu) && - (adev->asic_type == CHIP_NAVI10) && + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (adev->rev_id == 0)) && (adev->pm.pp_feature & PP_MCLK_DPM_MASK)) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) @@ -354,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */ if (is_asic_secure(smu) && - (adev->asic_type == CHIP_NAVI10) && + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (adev->rev_id == 0)) *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); @@ -925,18 +925,18 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu, return ret; } - switch (adev->asic_type) { - case CHIP_NAVI12: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 9): if (smu_version > 0x00341C00) ret = navi12_get_smu_metrics_data(smu, member, value); else ret = navi12_get_legacy_smu_metrics_data(smu, member, value); break; - case CHIP_NAVI10: - case CHIP_NAVI14: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): default: - if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) || - ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) ret = navi10_get_smu_metrics_data(smu, member, value); else ret = navi10_get_legacy_smu_metrics_data(smu, member, value); @@ -1509,8 +1509,8 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) uint32_t sclk_freq; pstate_table->gfxclk_pstate.min = gfx_table->min; - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): switch (adev->pdev->revision) { case 0xf0: /* XTX */ case 0xc0: @@ -1525,7 +1525,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) break; } break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): switch (adev->pdev->revision) { case 0xc7: /* XT */ case 0xf4: @@ -1548,7 +1548,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) break; } break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK; break; default: @@ -2562,8 +2562,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu) if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) return false; - if (adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_NAVI14) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) return true; return false; @@ -2671,8 +2671,8 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) * - PPSMC_MSG_SetDriverDummyTableDramAddrLow * - PPSMC_MSG_GetUMCFWWA */ - if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) || - ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) { + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GET_UMC_FW_WA, 0, @@ -2691,13 +2691,13 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) return 0; if (umc_fw_disable_cdr) { - if (adev->asic_type == CHIP_NAVI10) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) return navi10_umc_hybrid_cdr_workaround(smu); } else { return navi10_set_dummy_pstates_table_location(smu); } } else { - if (adev->asic_type == CHIP_NAVI10) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) return navi10_umc_hybrid_cdr_workaround(smu); } @@ -3151,18 +3151,18 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, return ret; } - switch (adev->asic_type) { - case CHIP_NAVI12: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 9): if (smu_version > 0x00341C00) ret = navi12_get_gpu_metrics(smu, table); else ret = navi12_get_legacy_gpu_metrics(smu, table); break; - case CHIP_NAVI10: - case CHIP_NAVI14: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): default: - if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) || - ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) ret = navi10_get_gpu_metrics(smu, table); else ret =navi10_get_legacy_gpu_metrics(smu, table); @@ -3180,7 +3180,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) uint32_t param = 0; /* Navi12 does not support this */ - if (adev->asic_type == CHIP_NAVI12) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) return 0; /* diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index ca57221e3962..a4108025fe29 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -74,7 +74,7 @@ #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 #define GET_PPTABLE_MEMBER(field, member) do {\ - if (smu->adev->asic_type == CHIP_BEIGE_GOBY)\ + if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\ (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\ else\ (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\ @@ -82,7 +82,7 @@ static int get_table_size(struct smu_context *smu) { - if (smu->adev->asic_type == CHIP_BEIGE_GOBY) + if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) return sizeof(PPTable_beige_goby_t); else return sizeof(PPTable_t); @@ -298,7 +298,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, } if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) && - (adev->asic_type > CHIP_SIENNA_CICHLID) && + (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) && !(adev->flags & AMD_IS_APU)) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT); @@ -496,7 +496,7 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s uint32_t throttler_status = 0; int i; - if ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) && + if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu->smc_fw_version >= 0x3A4300)) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= @@ -517,7 +517,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); SmuMetrics_V2_t *metrics_v2 = &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V2); - bool use_metrics_v2 = ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) && + bool use_metrics_v2 = ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu->smc_fw_version >= 0x3A4300)) ? true : false; uint16_t average_gfx_activity; int ret = 0; @@ -670,7 +670,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_dpm_table *dpm_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; + int i, ret = 0; DpmDescriptor_t *table_member; /* socclk dpm table setup */ @@ -746,78 +746,45 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* vclk0 dpm table setup */ - dpm_table = &dpm_context->dpm_tables.vclk_table; - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_v11_0_set_single_dpm_table(smu, - SMU_VCLK, - dpm_table); - if (ret) - return ret; - dpm_table->is_fine_grained = - !table_member[PPCLK_VCLK_0].SnapToDiscrete; - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; - dpm_table->dpm_levels[0].enabled = true; - dpm_table->min = dpm_table->dpm_levels[0].value; - dpm_table->max = dpm_table->dpm_levels[0].value; - } + /* vclk0/1 dpm table setup */ + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; - /* vclk1 dpm table setup */ - if (adev->vcn.num_vcn_inst > 1) { - dpm_table = &dpm_context->dpm_tables.vclk1_table; + dpm_table = &dpm_context->dpm_tables.vclk_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, - SMU_VCLK1, + i ? SMU_VCLK1 : SMU_VCLK, dpm_table); if (ret) return ret; dpm_table->is_fine_grained = - !table_member[PPCLK_VCLK_1].SnapToDiscrete; + !table_member[i ? PPCLK_VCLK_1 : PPCLK_VCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; - dpm_table->dpm_levels[0].value = - smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; dpm_table->dpm_levels[0].enabled = true; dpm_table->min = dpm_table->dpm_levels[0].value; dpm_table->max = dpm_table->dpm_levels[0].value; } } - /* dclk0 dpm table setup */ - dpm_table = &dpm_context->dpm_tables.dclk_table; - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_v11_0_set_single_dpm_table(smu, - SMU_DCLK, - dpm_table); - if (ret) - return ret; - dpm_table->is_fine_grained = - !table_member[PPCLK_DCLK_0].SnapToDiscrete; - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; - dpm_table->dpm_levels[0].enabled = true; - dpm_table->min = dpm_table->dpm_levels[0].value; - dpm_table->max = dpm_table->dpm_levels[0].value; - } - - /* dclk1 dpm table setup */ - if (adev->vcn.num_vcn_inst > 1) { - dpm_table = &dpm_context->dpm_tables.dclk1_table; + /* dclk0/1 dpm table setup */ + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + dpm_table = &dpm_context->dpm_tables.dclk_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, - SMU_DCLK1, + i ? SMU_DCLK1 : SMU_DCLK, dpm_table); if (ret) return ret; dpm_table->is_fine_grained = - !table_member[PPCLK_DCLK_1].SnapToDiscrete; + !table_member[i ? PPCLK_DCLK_1 : PPCLK_DCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; - dpm_table->dpm_levels[0].value = - smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; dpm_table->dpm_levels[0].enabled = true; dpm_table->min = dpm_table->dpm_levels[0].value; dpm_table->max = dpm_table->dpm_levels[0].value; @@ -902,32 +869,18 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { struct amdgpu_device *adev = smu->adev; - int ret = 0; + int i, ret = 0; - if (enable) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + 0x10000 * i, NULL); if (ret) return ret; - if (adev->vcn.num_vcn_inst > 1) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, - 0x10000, NULL); - if (ret) - return ret; - } - } - } else { - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); - if (ret) - return ret; - if (adev->vcn.num_vcn_inst > 1) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, - 0x10000, NULL); - if (ret) - return ret; - } } } @@ -1170,7 +1123,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, * and onwards SMU firmwares. */ smu_cmn_get_smc_version(smu, NULL, &smu_version); - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu_version < 0x003a2900)) break; @@ -1937,7 +1890,7 @@ static void sienna_cichlid_dump_od_table(struct smu_context *smu, od_table->UclkFmax); smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (!((adev->asic_type == CHIP_SIENNA_CICHLID) && + if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu_version < 0x003a2900))) dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset); } @@ -2161,7 +2114,7 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, * and onwards SMU firmwares. */ smu_cmn_get_smc_version(smu, NULL, &smu_version); - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu_version < 0x003a2900)) { dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported " "only by 58.41.0 and onwards SMU firmwares!\n"); @@ -2865,7 +2818,7 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; int i; - if (smu->adev->asic_type == CHIP_BEIGE_GOBY) { + if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) { beige_goby_dump_pptable(smu); return; } @@ -3625,7 +3578,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, SmuMetrics_V2_t *metrics_v2 = &(metrics_external.SmuMetrics_V2); struct amdgpu_device *adev = smu->adev; - bool use_metrics_v2 = ((adev->asic_type == CHIP_SIENNA_CICHLID) && + bool use_metrics_v2 = ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu->smc_fw_version >= 0x3A4300)) ? true : false; uint16_t average_gfx_activity; int ret = 0; @@ -3706,8 +3659,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed; - if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu->smc_fw_version > 0x003A1E00) || - ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu->smc_fw_version > 0x00410400)) { + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) { gpu_metrics->pcie_link_width = use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth; gpu_metrics->pcie_link_speed = link_speed[use_metrics_v2 ? metrics_v2->PcieRate : metrics->PcieRate]; } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 87b055466a33..28b7c0562b99 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -90,37 +90,38 @@ int smu_v11_0_init_microcode(struct smu_context *smu) struct amdgpu_firmware_info *ucode = NULL; if (amdgpu_sriov_vf(adev) && - ((adev->asic_type == CHIP_NAVI12) || - (adev->asic_type == CHIP_SIENNA_CICHLID))) + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) return 0; - switch (adev->asic_type) { - case CHIP_ARCTURUS: - chip_name = "arcturus"; - break; - case CHIP_NAVI10: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): chip_name = "navi14"; break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): chip_name = "navi12"; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(11, 0, 7): chip_name = "sienna_cichlid"; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(11, 0, 11): chip_name = "navy_flounder"; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(11, 0, 12): chip_name = "dimgrey_cavefish"; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 13): chip_name = "beige_goby"; break; + case IP_VERSION(11, 0, 2): + chip_name = "arcturus"; + break; default: - dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + dev_err(adev->dev, "Unsupported IP version 0x%x\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } @@ -238,39 +239,40 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) if (smu->is_apu) adev->pm.fw_version = smu_version; - switch (smu->adev->asic_type) { - case CHIP_ARCTURUS: - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; - break; - case CHIP_NAVI10: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(11, 0, 7): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(11, 0, 11): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(11, 0, 12): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 13): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(11, 0, 8): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; break; + case IP_VERSION(11, 0, 2): + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; + break; default: - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); + dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", + adev->ip_versions[MP1_HWIP][0]); smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; break; } @@ -492,8 +494,9 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu) int smu_v11_0_init_power(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; - size_t size = smu->adev->asic_type == CHIP_VANGOGH ? + size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ? sizeof(struct smu_11_5_power_context) : sizeof(struct smu_11_0_power_context); @@ -750,8 +753,10 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) /* Navy_Flounder/Dimgrey_Cavefish do not support to change * display num currently */ - if (adev->asic_type >= CHIP_NAVY_FLOUNDER && - adev->asic_type <= CHIP_BEIGE_GOBY) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) return 0; return smu_cmn_send_smc_msg_with_param(smu, @@ -974,10 +979,16 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu, return ret; } -int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { int power_src; int ret = 0; + uint32_t limit_param; + + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); @@ -997,16 +1008,16 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) * BIT 16-23: PowerSource * BIT 0-15: PowerLimit */ - n &= 0xFFFF; - n |= 0 << 24; - n |= (power_src) << 16; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + limit_param = (limit & 0xFFFF); + limit_param |= 0 << 24; + limit_param |= (power_src) << 16; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL); if (ret) { dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret; } - smu->current_power_limit = n; + smu->current_power_limit = limit; return 0; } @@ -1136,15 +1147,15 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0; struct amdgpu_device *adev = smu->adev; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -1630,11 +1641,11 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) mutex_lock(&smu_baco->mutex); if (state == SMU_BACO_STATE_ENTER) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, @@ -1649,7 +1660,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) default: if (!ras || !adev->ras_enabled || adev->gmc.xgmi.pending_reset) { - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); @@ -1931,7 +1942,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, * Separate MCLK and SOCCLK soft min/max settings are not allowed * on Arcturus. */ - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { mclk_min = mclk_max = 0; socclk_min = socclk_max = 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index f6ef0ce6e9e2..421f38e8dada 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1386,52 +1386,38 @@ static int vangogh_set_performance_level(struct smu_context *smu, uint32_t soc_mask, mclk_mask, fclk_mask; uint32_t vclk_mask = 0, dclk_mask = 0; + smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; + smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; + switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; + smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; ret = vangogh_force_dpm_limit_value(smu, true); + if (ret) + return ret; break; case AMD_DPM_FORCED_LEVEL_LOW: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; + smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; ret = vangogh_force_dpm_limit_value(smu, false); + if (ret) + return ret; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - ret = vangogh_unforce_dpm_levels(smu); - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetHardMinGfxClk, - VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetSoftMaxGfxClk, - VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); if (ret) return ret; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; + smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; ret = vangogh_get_profiling_clk_mask(smu, level, &vclk_mask, @@ -1446,32 +1432,15 @@ static int vangogh_set_performance_level(struct smu_context *smu, vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); - break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, - VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, - VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); - if (ret) - return ret; + smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - ret = vangogh_get_profiling_clk_mask(smu, level, NULL, NULL, @@ -1484,29 +1453,29 @@ static int vangogh_set_performance_level(struct smu_context *smu, vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, - VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); - if (ret) - return ret; + smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; + smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, - VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); + ret = vangogh_set_peak_clock_by_device(smu); if (ret) return ret; - - ret = vangogh_set_peak_clock_by_device(smu); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: - break; + return 0; } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, + smu->gfx_actual_hard_min_freq, NULL); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + smu->gfx_actual_soft_max_freq, NULL); + if (ret) + return ret; + return ret; } @@ -2144,11 +2113,12 @@ static int vangogh_get_ppt_limit(struct smu_context *smu, return 0; } -static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit) +static int vangogh_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t ppt_limit) { struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; - uint32_t limit_type = ppt_limit >> 24; + smu->smu_power.power_context; int ret = 0; if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 5019903db492..59a7d276541d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1241,11 +1241,13 @@ static int aldebaran_get_power_limit(struct smu_context *smu, return 0; } -static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n) +static int aldebaran_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { /* Power limit can be set only through primary die */ if (aldebaran_is_primary(smu)) - return smu_v13_0_set_power_limit(smu, n); + return smu_v13_0_set_power_limit(smu, limit_type, limit); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a0e50f23b1dd..35145db6eedf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -89,12 +89,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu) if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_ALDEBARAN: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 2): chip_name = "aldebaran"; break; default: - dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + dev_err(adev->dev, "Unsupported IP version 0x%x\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } @@ -210,15 +211,17 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - switch (smu->adev->asic_type) { - case CHIP_ALDEBARAN: + switch (smu->adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 2): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; break; default: - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); + dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", + smu->adev->ip_versions[MP1_HWIP][0]); smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; break; } @@ -740,8 +743,9 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0; struct amdgpu_device *adev = smu->adev; - switch (adev->asic_type) { - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -941,22 +945,27 @@ int smu_v13_0_get_current_power_limit(struct smu_context *smu, return ret; } -int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { int ret = 0; + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); return -EOPNOTSUPP; } - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); if (ret) { dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret; } - smu->current_power_limit = n; + smu->current_power_limit = limit; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a403657151ba..8215bbf5ed7c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), - MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -135,14 +134,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(CUSTOM_DPM), TAB_MAP_VALID(DPMCLOCKS), }; - -static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), -}; static int yellow_carp_init_smc_tables(struct smu_context *smu) { @@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu, return 0; } -static int yellow_carp_get_power_profile_mode(struct smu_context *smu, - char *buf) -{ - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - uint32_t i, size = 0; - int16_t workload_type = 0; - - if (!buf) - return -EINVAL; - - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT. - * Not all profile modes are supported on yellow carp. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - i); - - if (workload_type < 0) - continue; - - size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); - } - - return size; -} - -static int yellow_carp_set_power_profile_mode(struct smu_context *smu, - long *input, uint32_t size) -{ - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n", - profile_mode); - return -EINVAL; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); - if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); - return ret; - } - - smu->power_profile_mode = profile_mode; - - return 0; -} - static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -1238,8 +1154,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, - .get_power_profile_mode = yellow_carp_get_power_profile_mode, - .set_power_profile_mode = yellow_carp_set_power_profile_mode, .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, @@ -1261,6 +1175,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu) smu->message_map = yellow_carp_message_map; smu->feature_map = yellow_carp_feature_mask_map; smu->table_map = yellow_carp_table_map; - smu->workload_map = yellow_carp_workload_map; smu->is_apu = true; } |