diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
8 files changed, 434 insertions, 123 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2f4422d4c8a4..41e4774abdb0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -91,10 +91,14 @@ #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "soc15_hw_ip.h" +#include "soc15_common.h" #include "vega10_ip_offset.h" #include "soc15_common.h" +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" + #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" #include "modules/inc/mod_info_packet.h" @@ -120,6 +124,11 @@ MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); +#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); +#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); + #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -1258,10 +1267,20 @@ static void vblank_control_worker(struct work_struct *work) DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); - /* Control PSR based on vblank requirements from OS */ + /* + * Control PSR based on vblank requirements from OS + * + * If panel supports PSR SU, there's no need to disable PSR when OS is + * submitting fast atomic commits (we infer this by whether the OS + * requests vblank events). Fast atomic commits will simply trigger a + * full-frame-update (FFU); a specific case of selective-update (SU) + * where the SU region is the full hactive*vactive region. See + * fill_dc_dirty_rects(). + */ if (vblank_work->stream && vblank_work->stream->link) { if (vblank_work->enable) { - if (vblank_work->stream->link->psr_settings.psr_allow_active) + if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && + vblank_work->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(vblank_work->stream); } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && !vblank_work->stream->link->psr_settings.psr_allow_active && @@ -1509,6 +1528,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("Seamless boot condition check passed\n"); } + init_data.flags.enable_mipi_converter_optimization = true; + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1803,6 +1824,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): return 0; default: break; @@ -1926,6 +1949,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_asic = DMUB_ASIC_DCN316; fw_name_dmub = FIRMWARE_DCN316_DMUB; break; + case IP_VERSION(3, 2, 0): + dmub_asic = DMUB_ASIC_DCN32; + fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; + break; + case IP_VERSION(3, 2, 1): + dmub_asic = DMUB_ASIC_DCN321; + fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -2172,7 +2203,8 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) } else { ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { - drm_dp_mst_topology_mgr_set_mst(mgr, false); + dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); need_hotplug = true; } } @@ -2554,34 +2586,6 @@ cleanup: return; } -static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) -{ - struct dc_stream_state *stream_state; - struct amdgpu_dm_connector *aconnector = link->priv; - struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); - struct dc_stream_update stream_update; - bool dpms_off = true; - - memset(&stream_update, 0, sizeof(stream_update)); - stream_update.dpms_off = &dpms_off; - - mutex_lock(&adev->dm.dc_lock); - stream_state = dc_stream_find_from_link(link); - - if (stream_state == NULL) { - DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n"); - mutex_unlock(&adev->dm.dc_lock); - return; - } - - stream_update.stream = stream_state; - acrtc_state->force_dpms_off = true; - dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, - stream_state, &stream_update, - stream_state->ctx->dc->current_state); - mutex_unlock(&adev->dm.dc_lock); -} - static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; @@ -2814,7 +2818,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; + u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; @@ -2844,7 +2848,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| @@ -2872,8 +2876,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) * The results of the above expressions can be verified at * pre_computed_values. */ - q = max_cll >> 5; - r = max_cll % 32; + q = max_avg >> 5; + r = max_avg % 32; max = (1 << q) * pre_computed_values[r]; // min luminance: maxLum * (CV/255)^2 / 100 @@ -3032,16 +3036,13 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); +#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - struct dm_crtc_state *dm_crtc_state = NULL; +#endif if (adev->dm.disable_hpd_irq) return; - if (dm_con_state->base.state && dm_con_state->base.crtc) - dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( - dm_con_state->base.state, - dm_con_state->base.crtc)); /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. @@ -3071,11 +3072,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_kms_helper_connector_hotplug_event(connector); } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { - if (new_connection_type == dc_connection_none && - aconnector->dc_link->type == dc_connection_none && - dm_crtc_state) - dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); - amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); @@ -3868,9 +3864,6 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { @@ -4074,7 +4067,6 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) else DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); } -#endif static int initialize_plane(struct amdgpu_display_manager *dm, struct amdgpu_mode_info *mode_info, int plane_id, @@ -4120,9 +4112,6 @@ static int initialize_plane(struct amdgpu_display_manager *dm, static void register_backlight_device(struct amdgpu_display_manager *dm, struct dc_link *link) { -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && link->type != dc_connection_none) { /* @@ -4138,7 +4127,6 @@ static void register_backlight_device(struct amdgpu_display_manager *dm, dm->num_of_edps++; } } -#endif } @@ -4235,6 +4223,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); @@ -4253,6 +4243,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): psr_feature_enabled = true; break; default: @@ -4261,9 +4253,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } - /* Disable vblank IRQs aggressively for power-saving. */ - adev_to_drm(adev)->vblank_disable_immediate = true; - /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4370,6 +4359,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4556,6 +4547,8 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -4865,7 +4858,9 @@ fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); - unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits); + unsigned int pipes_log2; + + pipes_log2 = min(5u, mod_pipe_xor_bits); fill_gfx9_tiling_info_from_device(adev, tiling_info); @@ -5201,8 +5196,73 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } +static void +add_gfx11_modifiers(struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int num_pipes = 0; + int pipe_xor_bits = 0; + int num_pkrs = 0; + int pkrs = 0; + u32 gb_addr_config; + u8 i = 0; + unsigned swizzle_r_x; + uint64_t modifier_r_x; + uint64_t modifier_dcc_best; + uint64_t modifier_dcc_4k; + + /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from + * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */ + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + ASSERT(gb_addr_config != 0); + + num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); + pkrs = ilog2(num_pkrs); + num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); + pipe_xor_bits = ilog2(num_pipes); + + for (i = 0; i < 2; i++) { + /* Insert the best one first. */ + /* R_X swizzle modes are the best for rendering and DCC requires them. */ + if (num_pipes > 16) + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; + else + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; + + modifier_r_x = AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(TILE, swizzle_r_x) | + AMD_FMT_MOD_SET(PACKERS, pkrs); + + /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ + modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); + + /* DCC settings for 4K and greater resolutions. (required by display hw) */ + modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); + + add_modifier(mods, size, capacity, modifier_dcc_best); + add_modifier(mods, size, capacity, modifier_dcc_4k); + + add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + + add_modifier(mods, size, capacity, modifier_r_x); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); +} + static int -get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; @@ -5234,6 +5294,9 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u else add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; + case AMDGPU_FAMILY_GC_11_0_0: + add_gfx11_modifiers(adev, mods, &size, &capacity); + break; } add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); @@ -5272,7 +5335,7 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, dcc->enable = 1; dcc->meta_pitch = afb->base.pitches[1]; dcc->independent_64b_blks = independent_64b_blks; - if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { if (independent_64b_blks && independent_128b_blks) dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; else if (independent_128b_blks) @@ -5640,6 +5703,117 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, return 0; } +/** + * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates + * + * @plane: DRM plane containing dirty regions that need to be flushed to the eDP + * remote fb + * @old_plane_state: Old state of @plane + * @new_plane_state: New state of @plane + * @crtc_state: New state of CRTC connected to the @plane + * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * + * For PSR SU, DC informs the DMUB uController of dirty rectangle regions + * (referred to as "damage clips" in DRM nomenclature) that require updating on + * the eDP remote buffer. The responsibility of specifying the dirty regions is + * amdgpu_dm's. + * + * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the + * plane with regions that require flushing to the eDP remote buffer. In + * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - + * implicitly provide damage clips without any client support via the plane + * bounds. + * + * Today, amdgpu_dm only supports the MPO and cursor usecase. + * + * TODO: Also enable for FB_DAMAGE_CLIPS + */ +static void fill_dc_dirty_rects(struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state, + struct drm_crtc_state *crtc_state, + struct dc_flip_addrs *flip_addrs) +{ + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + struct rect *dirty_rects = flip_addrs->dirty_rects; + uint32_t num_clips; + bool bb_changed; + bool fb_changed; + uint32_t i = 0; + + flip_addrs->dirty_rect_count = 0; + + /* + * Cursor plane has it's own dirty rect update interface. See + * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return; + + /* + * Today, we only consider MPO use-case for PSR SU. If MPO not + * requested, and there is a plane update, do FFU. + */ + if (!dm_crtc_state->mpo_requested) { + dirty_rects[0].x = 0; + dirty_rects[0].y = 0; + dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; + dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; + flip_addrs->dirty_rect_count = 1; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", + new_plane_state->plane->base.id, + dm_crtc_state->base.mode.crtc_hdisplay, + dm_crtc_state->base.mode.crtc_vdisplay); + return; + } + + /* + * MPO is requested. Add entire plane bounding box to dirty rects if + * flipped to or damaged. + * + * If plane is moved or resized, also add old bounding box to dirty + * rects. + */ + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + fb_changed = old_plane_state->fb->base.id != + new_plane_state->fb->base.id; + bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || + old_plane_state->crtc_y != new_plane_state->crtc_y || + old_plane_state->crtc_w != new_plane_state->crtc_w || + old_plane_state->crtc_h != new_plane_state->crtc_h); + + DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", + new_plane_state->plane->base.id, + bb_changed, fb_changed, num_clips); + + if (num_clips || fb_changed || bb_changed) { + dirty_rects[i].x = new_plane_state->crtc_x; + dirty_rects[i].y = new_plane_state->crtc_y; + dirty_rects[i].width = new_plane_state->crtc_w; + dirty_rects[i].height = new_plane_state->crtc_h; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", + new_plane_state->plane->base.id, + dirty_rects[i].x, dirty_rects[i].y, + dirty_rects[i].width, dirty_rects[i].height); + i += 1; + } + + /* Add old plane bounding-box if plane is moved or resized */ + if (bb_changed) { + dirty_rects[i].x = old_plane_state->crtc_x; + dirty_rects[i].y = old_plane_state->crtc_y; + dirty_rects[i].width = old_plane_state->crtc_w; + dirty_rects[i].height = old_plane_state->crtc_h; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", + old_plane_state->plane->base.id, + dirty_rects[i].x, dirty_rects[i].y, + dirty_rects[i].width, dirty_rects[i].height); + i += 1; + } + + flip_addrs->dirty_rect_count = i; +} + static void update_stream_scaling_settings(const struct drm_display_mode *mode, const struct dm_connector_state *dm_state, struct dc_stream_state *stream) @@ -6587,7 +6761,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; - state->force_dpms_off = cur->force_dpms_off; + state->mpo_requested = cur->mpo_requested; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -6679,7 +6853,7 @@ static void dm_disable_vblank(struct drm_crtc *crtc) dm_set_vblank(crtc, false); } -/* Implemented only the options currently availible for the driver */ +/* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, @@ -6846,15 +7020,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) if (aconnector->mst_mgr.dev) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) for (i = 0; i < dm->num_of_edps; i++) { if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { backlight_device_unregister(dm->backlight_dev[i]); dm->backlight_dev[i] = NULL; } } -#endif if (aconnector->dc_em_sink) dc_sink_release(aconnector->dc_em_sink); @@ -7042,7 +7213,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; } - dc_result = dc_validate_stream(adev->dm.dc, stream); + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); + + if (dc_result == DC_OK) + dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result != DC_OK) { DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", @@ -7342,7 +7517,7 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder) } -static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) { switch (display_color_depth) { case COLOR_DEPTH_666: @@ -9224,6 +9399,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; + fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, + new_crtc_state, + &bundle->flip_addrs[planes_count]); + /* * Only allow immediate flips for fast updates that don't * change FB pitch, DCC state, rotation or mirroing. @@ -9310,8 +9489,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * and rely on sending it from software. */ if (acrtc_attach->base.state->event && - acrtc_state->active_planes > 0 && - !acrtc_state->force_dpms_off) { + acrtc_state->active_planes > 0) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); @@ -9419,6 +9597,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Allow PSR when skip count is 0. */ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; + + /* + * If sink supports PSR SU, there is no need to rely on + * a vblank event disable request to enable PSR. PSR SU + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_psr_entry && + !acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_enable(acrtc_state->stream); } else { acrtc_attach->dm_irq_params.allow_psr_entry = false; } @@ -9912,15 +10102,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } -#endif + /* * send vblank event on all events not handled in flip and * mark consumed event for drm_atomic_helper_commit_hw_done @@ -10368,7 +10556,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ - if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) + if (!aconnector) goto skip_modeset; if (modereset_required(new_crtc_state)) @@ -10979,7 +11167,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } } - pre_validate_dsc(state, &dm_state, vars); + if (!pre_validate_dsc(state, &dm_state, vars)) { + ret = -EINVAL; + goto fail; + } } #endif for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -11225,6 +11416,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, #if defined(CONFIG_DRM_AMD_DC_DCN) if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index aa34c0068f41..73755b304299 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -242,6 +242,13 @@ struct hpd_rx_irq_offload_work { * @force_timing_sync: set via debugfs. When set, indicates that all connected * displays will be forced to synchronize. * @dmcub_trace_event_en: enable dmcub trace events + * @dmub_outbox_params: DMUB Outbox parameters + * @num_of_edps: number of backlight eDPs + * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the + * driver when true + * @dmub_aux_transfer_done: struct completion used to indicate when DMUB + * transfers are done + * @delayed_hpd_wq: work queue used to delay DMUB HPD work */ struct amdgpu_display_manager { @@ -583,7 +590,6 @@ struct amdgpu_dm_connector { struct drm_dp_mst_port *port; struct amdgpu_dm_connector *mst_port; struct drm_dp_aux *dsc_aux; - /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; @@ -639,8 +645,6 @@ struct dm_crtc_state { bool dsc_force_changed; bool vrr_supported; - - bool force_dpms_off; struct mod_freesync_config freesync_config; struct dc_info_packet vrr_infopacket; @@ -749,4 +753,6 @@ int dm_atomic_get_state(struct drm_atomic_state *state, struct amdgpu_dm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc); + +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 2a78ae007521..b64507f294ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -540,11 +540,11 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, /* apply phy settings from user */ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) { - link_lane_settings.lane_settings[r].VOLTAGE_SWING = + link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING = (enum dc_voltage_swing) (param[0]); - link_lane_settings.lane_settings[r].PRE_EMPHASIS = + link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS = (enum dc_pre_emphasis) (param[1]); - link_lane_settings.lane_settings[r].POST_CURSOR2 = + link_lane_settings.hw_lane_settings[r].POST_CURSOR2 = (enum dc_post_cursor2) (param[2]); } @@ -738,7 +738,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us } for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) - link_training_settings.lane_settings[i] = link->cur_lane_setting[i]; + link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i]; dc_link_set_test_pattern( link, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 15c0e3f2a9c3..d7c41c133e2c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -476,13 +476,16 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; display->stream_enc_idx = config->stream_enc_idx; link->link_enc_idx = config->link_enc_idx; + link->dio_output_id = config->dio_output_idx; link->phy_idx = config->phy_idx; + if (sink) link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal); link->hdcp_supported_informational = link_is_hdcp14; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.assr_enabled = config->assr_enabled; link->dp.mst_enabled = config->mst_enabled; + link->dp.usb4_enabled = config->usb4_enabled; display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 3; link->adjust.hdcp1.disable = 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 7c799ddc1d27..137645d40b72 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -451,7 +451,6 @@ bool dm_helpers_dp_mst_stop_top_mgr( struct dc_link *link) { struct amdgpu_dm_connector *aconnector = link->priv; - uint8_t i; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); @@ -463,22 +462,7 @@ bool dm_helpers_dp_mst_stop_top_mgr( if (aconnector->mst_mgr.mst_state == true) { drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); - - for (i = 0; i < MAX_SINKS_PER_LINK; i++) { - if (link->remote_sinks[i] == NULL) - continue; - - if (link->remote_sinks[i]->sink_signal == - SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_link_remove_remote_sink(link, link->remote_sinks[i]); - - if (aconnector->dc_sink) { - dc_sink_release(aconnector->dc_sink); - aconnector->dc_sink = NULL; - aconnector->dc_link->cur_link_settings.lane_count = 0; - } - } - } + link->cur_link_settings.lane_count = 0; } return false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9221b6690a4a..f3ce37664143 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -140,11 +140,28 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) static void amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) { - struct amdgpu_dm_connector *amdgpu_dm_connector = + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_port *port = amdgpu_dm_connector->port; + struct drm_dp_mst_port *port = aconnector->port; + struct amdgpu_dm_connector *root = aconnector->mst_port; + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_sink = aconnector->dc_sink; drm_dp_mst_connector_early_unregister(connector, port); + + /* + * Release dc_sink for connector which its attached port is + * no longer in the mst topology + */ + drm_modeset_lock(&root->mst_mgr.base.lock, NULL); + if (dc_sink) { + if (dc_link->sink_count) + dc_link_remove_remote_sink(dc_link, dc_sink); + + dc_sink_release(dc_sink); + aconnector->dc_sink = NULL; + } + drm_modeset_unlock(&root->mst_mgr.base.lock); } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { @@ -344,12 +361,59 @@ dm_dp_mst_detect(struct drm_connector *connector, { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *master = aconnector->mst_port; + struct drm_dp_mst_port *port = aconnector->port; + int connection_status; if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, - aconnector->port); + connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); + + if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { + uint8_t dpcd_rev; + int ret; + + ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev); + + if (ret == 1) { + port->dpcd_rev = dpcd_rev; + + /* Could be DP1.2 DP Rx case*/ + if (!dpcd_rev) { + ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev); + + if (ret == 1) + port->dpcd_rev = dpcd_rev; + } + + if (!dpcd_rev) + DRM_DEBUG_KMS("Can't decide DPCD revision number!"); + } + + /* + * Could be legacy sink, logical port etc on DP1.2. + * Will get Nack under these cases when issue remote + * DPCD read. + */ + if (ret != 1) + DRM_DEBUG_KMS("Can't access DPCD"); + } else if (port->pdt == DP_PEER_DEVICE_NONE) { + port->dpcd_rev = 0; + } + + /* + * Release dc_sink for connector which unplug event is notified by CSN msg + */ + if (connection_status == connector_status_disconnected && aconnector->dc_sink) { + if (aconnector->dc_link->sink_count) + dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + } + + return connection_status; } static int dm_dp_mst_atomic_check(struct drm_connector *connector, @@ -634,7 +698,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) return dsc_config.bits_per_pixel; } -static void increase_dsc_bpp(struct drm_atomic_state *state, +static bool increase_dsc_bpp(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, @@ -694,7 +758,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, params[next_index].port, vars[next_index].pbn, pbn_per_timeslot) < 0) - return; + return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); } else { @@ -704,7 +768,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, params[next_index].port, vars[next_index].pbn, pbn_per_timeslot) < 0) - return; + return false; } } else { vars[next_index].pbn += initial_slack[next_index]; @@ -713,7 +777,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, params[next_index].port, vars[next_index].pbn, pbn_per_timeslot) < 0) - return; + return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; } else { @@ -723,16 +787,17 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, params[next_index].port, vars[next_index].pbn, pbn_per_timeslot) < 0) - return; + return false; } } bpp_increased[next_index] = true; remaining_to_increase--; } + return true; } -static void try_disable_dsc(struct drm_atomic_state *state, +static bool try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, @@ -780,7 +845,7 @@ static void try_disable_dsc(struct drm_atomic_state *state, params[next_index].port, vars[next_index].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) - return; + return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].dsc_enabled = false; @@ -792,12 +857,13 @@ static void try_disable_dsc(struct drm_atomic_state *state, params[next_index].port, vars[next_index].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) - return; + return false; } tried[next_index] = true; remaining_to_try--; } + return true; } static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, @@ -913,9 +979,11 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - increase_dsc_bpp(state, dc_link, params, vars, count, k); + if (!increase_dsc_bpp(state, dc_link, params, vars, count, k)) + return false; - try_disable_dsc(state, dc_link, params, vars, count, k); + if (!try_disable_dsc(state, dc_link, params, vars, count, k)) + return false; set_dsc_configs_from_fairness_vars(params, vars, count, k); @@ -1187,21 +1255,22 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state) return ret; } -void pre_validate_dsc(struct drm_atomic_state *state, +bool pre_validate_dsc(struct drm_atomic_state *state, struct dm_atomic_state **dm_state_ptr, struct dsc_mst_fairness_vars *vars) { int i; struct dm_atomic_state *dm_state; struct dc_state *local_dc_state = NULL; + int ret = 0; if (!is_dsc_precompute_needed(state)) { DRM_INFO_ONCE("DSC precompute is not needed.\n"); - return; + return true; } if (dm_atomic_get_state(state, dm_state_ptr)) { DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); - return; + return false; } dm_state = *dm_state_ptr; @@ -1213,7 +1282,7 @@ void pre_validate_dsc(struct drm_atomic_state *state, local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); if (!local_dc_state) - return; + return false; for (i = 0; i < local_dc_state->stream_count; i++) { struct dc_stream_state *stream = dm_state->context->streams[i]; @@ -1239,11 +1308,19 @@ void pre_validate_dsc(struct drm_atomic_state *state, &state->crtcs[ind].new_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); + if (local_dc_state->streams[i] == NULL) { + ret = -EINVAL; + break; + } } } + if (ret != 0) + goto clean_exit; + if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; goto clean_exit; } @@ -1273,5 +1350,43 @@ clean_exit: } kfree(local_dc_state); + + return (ret == 0); } + #endif + +enum dc_status dm_dp_mst_is_port_support_mode( + struct amdgpu_dm_connector *aconnector, + struct dc_stream_state *stream) +{ + int bpp, pbn, branch_max_throughput_mps = 0; + + /* check if mode could be supported within fUll_pbn */ + bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; + pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); + if (pbn > aconnector->port->full_pbn) + return DC_FAIL_BANDWIDTH_VALIDATE; + + /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; + break; + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR420: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; + break; + default: + break; + } + + if (branch_max_throughput_mps != 0 && + ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) + return DC_FAIL_BANDWIDTH_VALIDATE; + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 85628ad59e6c..b92a7c5671aa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -59,8 +59,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool needs_dsc_aux_workaround(struct dc_link *link); -void pre_validate_dsc(struct drm_atomic_state *state, +bool pre_validate_dsc(struct drm_atomic_state *state, struct dm_atomic_state **dm_state_ptr, struct dsc_mst_fairness_vars *vars); +enum dc_status dm_dp_mst_is_port_support_mode( + struct amdgpu_dm_connector *aconnector, + struct dc_stream_state *stream); + #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 141fd2721501..c8da18e45b0e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -79,10 +79,12 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link) link->psr_settings.psr_feature_enabled = true; } - DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d\n", + DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", link->psr_settings.psr_feature_enabled, link->psr_settings.psr_version, - link->dpcd_caps.psr_info.psr_version); + link->dpcd_caps.psr_info.psr_version, + link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); } @@ -97,19 +99,24 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) struct dc_link *link = NULL; struct psr_config psr_config = {0}; struct psr_context psr_context = {0}; + struct dc *dc = NULL; bool ret = false; if (stream == NULL) return false; link = stream->link; + dc = link->ctx->dc; if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { - psr_config.psr_version = link->psr_settings.psr_version; - psr_config.psr_frame_capture_indication_req = 0; - psr_config.psr_rfb_setup_time = 0x37; - psr_config.psr_sdp_transmit_line_num_deadline = 0x20; - psr_config.allow_smu_optimizations = 0x0; + mod_power_calc_psr_configs(&psr_config, link, stream); + + /* linux DM specific updating for psr config fields */ + psr_config.allow_smu_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_SMU_OPT) && + mod_power_only_edp(dc->current_state, stream); + psr_config.allow_multi_disp_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); |