summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/TODO110
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c327
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c72
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c107
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c82
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c119
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1199
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c237
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h242
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c279
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c293
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c207
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c269
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c638
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c202
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/Makefile77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c)74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c)33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c)67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c)56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c)58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c278
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c249
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c204
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c182
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c)38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h256
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c409
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c372
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c2163
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h41
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h370
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c22
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c129
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
349 files changed, 10574 insertions, 3330 deletions
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 92a5c5efcf92..9a5bcafbf730 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
deleted file mode 100644
index a8a6c106e8c7..000000000000
--- a/drivers/gpu/drm/amd/display/TODO
+++ /dev/null
@@ -1,110 +0,0 @@
-===============================================================================
-TODOs
-===============================================================================
-
-1. Base this on drm-next - WIP
-
-
-2. Cleanup commit history
-
-
-3. WIP - Drop page flip helper and use DRM's version
-
-
-4. DONE - Flatten all DC objects
- * dc_stream/core_stream/stream should just be dc_stream
- * Same for other DC objects
-
- "Is there any major reason to keep all those abstractions?
-
- Could you collapse everything into struct dc_stream?
-
- I haven't looked recently but I didn't get the impression there was a
- lot of design around what was public/protected, more whatever needed
- to be used by someone else was in public."
- ~ Dave Airlie
-
-
-5. DONE - Rename DC objects to align more with DRM
- * dc_surface -> dc_plane_state
- * dc_stream -> dc_stream_state
-
-
-6. DONE - Per-plane and per-stream validation
-
-
-7. WIP - Per-plane and per-stream commit
-
-
-8. WIP - Split pipe_ctx into plane and stream resource structs
-
-
-9. Attach plane and stream reources to state object instead of validate_context
-
-
-10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
- * Use drm_display_info instead
- * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
-
- "Making sure you use the sink-specific helper libraries and kernel
- subsystems, since there's really no good reason to have 2nd
- implementation of those in the kernel. Looks likes that's done for mst
- and edid parsing. There's still a bit a midlayer feeling to the edid
- parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
- think it'd be much better if you convert that over to reading stuff
- from drm_display_info and if needed, push stuff into the core). Also,
- I can't come up with a good reason why DC needs all this (except to
- reimplement half of our edid quirk table, which really isn't a good
- idea). Might be good if you put this onto the list of things to fix
- long-term, but imo not a blocker. Definitely make sure new stuff
- doesn't slip in (i.e. if you start adding edid quirks to DC instead of
- the drm core, refactoring to use the core edid stuff was pointless)."
- ~ Daniel Vetter
-
-
-11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
-overy complicated HW programming function for sendind and receiving i2c/aux
-commands. We can greatly simplify that and move it into dc/dceXYZ like other
-HW blocks.
-
-12. drm_modeset_lock in MST should no longer be needed in recent kernels
- * Adopt appropriate locking scheme
-
-13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
-a few indirections, and consider removing entirely and using the
-drm_atomic_helper_best_encoder default behaviour.
-
-14. core/dc_debug.c, consider switching to the atomic state debug helpers and
-moving all your driver state printing into the various atomic_print_state
-callbacks. There's also plans to expose this stuff in a standard way across all
-drivers, to make debugging userspace compositors easier across different hw.
-
-15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
-dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
-
-16. Move to core SCDC helpers (I think those are new since initial DC review).
-
-17. There's still a pretty massive layer cake around dp aux and DPCD handling,
-with like 3 levels of abstraction and using your own structures instead of the
-stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
-incompatible styles, just means more reasons not to add a third (or well third
-one gets to do the cleanup refactor).
-
-18. There's a pile of sink handling code, both for DP and HDMI where I didn't
-immediately recognize the standard. I think long term it'd be best for the drm
-subsystem if we try to move as much of that into helpers/core as possible, and
-share it with drivers. But that's a very long term goal, and by far not just an
-issue with DC - other drivers, especially around DP sink handling, are equally
-guilty.
-
-19. DONE - The DC logger is still a rather sore thing, but I know that the
-DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
-something that integrates better with DRM and linux debug printing, while not
-being useless with filtering output. dynamic debug printing might be an option.
-
-20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
-retimer that we need to program to pass PHY compliance. Currently that's
-bypassing the i2c device and goes directly to HW. This should be changed.
-
-21. Remove vector.c from dc/basics. It's used in DDC code which can probably
-be simplified enough to no longer need a vector implementation.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1a9bbb04bd5e..f1d67c6f4b98 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -67,6 +67,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -147,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -270,7 +274,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- u32 v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
struct amdgpu_crtc *acrtc = NULL;
struct dc *dc = adev->dm.dc;
@@ -844,7 +848,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
*/
static void dm_dmub_outbox1_low_irq(void *interrupt_params)
{
- struct dmub_notification notify;
+ struct dmub_notification notify = {0};
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
@@ -1218,6 +1222,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -1225,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
break;
}
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ break;
+ default:
+ break;
+ }
+
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error initializing DMUB HW: %d\n", status);
@@ -1721,8 +1735,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- init_data.flags.disable_ips_in_vpb = 1;
+ init_data.flags.disable_ips_in_vpb = 0;
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -1765,6 +1781,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@@ -1929,17 +1948,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = NULL;
}
- if (adev->dm.dc)
+ if (adev->dm.dc) {
dc_deinit_callbacks(adev->dm.dc);
-
- if (adev->dm.dc)
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
-
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
- kfree(adev->dm.dmub_notify);
- adev->dm.dmub_notify = NULL;
- destroy_workqueue(adev->dm.delayed_hpd_wq);
- adev->dm.delayed_hpd_wq = NULL;
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
}
if (adev->dm.dmub_bo)
@@ -2041,6 +2058,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
return 0;
default:
break;
@@ -2112,6 +2130,17 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
const struct dmcub_firmware_header_v1_0 *hdr;
enum dmub_asic dmub_asic;
enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
int r;
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
@@ -2150,6 +2179,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN321;
break;
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
default:
@@ -2209,7 +2239,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES;
- region_params.is_mailbox_in_inbox = false;
+ region_params.window_memory_type = window_memory_type;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -2237,6 +2267,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@ -2609,6 +2640,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
memset(del_streams, 0, sizeof(del_streams));
@@ -2635,7 +2667,9 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
- res = dc_commit_streams(dc, context->streams, context->stream_count);
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
fail:
dc_state_release(context);
@@ -2857,6 +2891,7 @@ static int dm_resume(void *handle)
struct dc_state *dc_state;
int i, r, j, ret;
bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
if (dm->dc->caps.ips_support) {
dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
@@ -2906,7 +2941,9 @@ static int dm_resume(void *handle)
dc_enable_dmub_outbox(adev->dm.dc);
}
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2923,7 +2960,7 @@ static int dm_resume(void *handle)
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
- dm_state->context = dc_state_create(dm->dc);
+ dm_state->context = dc_state_create(dm->dc, NULL);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
/* Before powering on DC we need to re-initialize DMUB. */
@@ -3009,6 +3046,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
+ dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
@@ -3027,6 +3065,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -3079,6 +3121,8 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.soft_reset = dm_soft_reset,
.set_clockgating_state = dm_set_clockgating_state,
.set_powergating_state = dm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version dm_ip_block = {
@@ -4395,6 +4439,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4477,6 +4522,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4498,6 +4544,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
psr_feature_enabled = true;
break;
default:
@@ -4506,6 +4553,27 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+/*
+ * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
+ * case IP_VERSION(3, 1, 4):
+ * case IP_VERSION(3, 1, 5):
+ * case IP_VERSION(3, 1, 6):
+ * case IP_VERSION(3, 2, 0):
+ * case IP_VERSION(3, 2, 1):
+ * case IP_VERSION(3, 5, 0):
+ * case IP_VERSION(3, 5, 1):
+ * replay_feature_enabled = true;
+ * break;
+ */
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4578,6 +4646,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -4645,6 +4718,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4778,6 +4852,9 @@ static int dm_init_microcode(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 0):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -4901,6 +4978,7 @@ static int dm_early_init(void *handle)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
@@ -5201,6 +5279,10 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
* @new_plane_state: New state of @plane
* @crtc_state: New state of CRTC connected to the @plane
* @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
* @dirty_regions_changed: dirty regions changed
*
* For PSR SU, DC informs the DMUB uController of dirty rectangle regions
@@ -5650,8 +5732,8 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
if (!adjust_colour_depth_from_display_info(timing_out, info) &&
drm_mode_is_420_also(info, mode_in) &&
@@ -5871,6 +5953,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@@ -6195,7 +6280,8 @@ create_stream_for_sink(struct drm_connector *connector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else if (aconnector) {
- recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -6254,27 +6340,22 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- stream->signal == SIGNAL_TYPE_EDP) {
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- stream->use_vsc_sdp_for_colorimetry = true;
- }
- if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
- if (stream->link->psr_settings.psr_feature_enabled)
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -6355,9 +6436,6 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
- } else if (property == adev->mode_info.abm_level_property) {
- dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
- ret = 0;
}
return ret;
@@ -6400,19 +6478,87 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
- } else if (property == adev->mode_info.abm_level_property) {
- *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
- dm_state->abm_level : 0;
- ret = 0;
}
return ret;
}
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -6474,9 +6620,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->vcpi_slots = 0;
state->pbn = 0;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- state->abm_level = amdgpu_dm_abm_level ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -6514,6 +6663,14 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -6665,7 +6822,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
if (!dc_plane_state)
goto cleanup;
- dc_state = dc_state_create(dc);
+ dc_state = dc_state_create(dc, NULL);
if (!dc_state)
goto cleanup;
@@ -7054,7 +7211,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
int i, j, ret;
- int vcpi, pbn_div, pbn, slot_num = 0;
+ int vcpi, pbn_div, pbn = 0, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -7430,7 +7587,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!edid)
+ if (!(amdgpu_freesync_vid_mode && edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -7547,12 +7704,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
- if (connector_type == DRM_MODE_CONNECTOR_eDP &&
- (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.abm_level_property, 0);
- }
-
if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
/* Content Type is currently only implemented for HDMI. */
drm_connector_attach_content_type_property(&aconnector->base);
@@ -8273,13 +8424,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
- bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
- bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
- bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func;
- bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func;
- bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
}
amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
@@ -8492,7 +8643,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.output_csc_transform =
&acrtc_state->stream->csc_color_matrix;
bundle->stream_update.out_transfer_func =
- acrtc_state->stream->out_transfer_func;
+ &acrtc_state->stream->out_transfer_func;
bundle->stream_update.lut3d_func =
(struct dc_3dlut *) acrtc_state->stream->lut3d_func;
bundle->stream_update.func_shaper =
@@ -8546,10 +8697,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
- amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8576,6 +8739,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
(timestamp_ns -
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
500000000)
@@ -8630,10 +8794,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
-notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -8713,6 +8877,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct drm_connector *connector;
bool mode_set_reset_required = false;
u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -8838,17 +9003,18 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
}
} /* for_each_crtc_in_state() */
- /* if there mode set or reset, disable eDP PSR */
+ /* if there mode set or reset, disable eDP PSR, Replay */
if (mode_set_reset_required) {
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
+ amdgpu_dm_replay_disable_all(dm);
amdgpu_dm_psr_disable_all(dm);
}
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
@@ -9715,7 +9881,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -9755,7 +9922,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
}
/* Now check if we should set freesync video mode */
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
is_timing_unchanged_for_freesync(new_crtc_state,
@@ -9768,7 +9935,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (aconnector &&
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
@@ -10451,7 +10618,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
trace_amdgpu_dm_atomic_check_begin(state);
@@ -11138,18 +11305,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (edid) {
- edid_check_required = is_dp_capable_without_timing_msa(
- adev->dm.dc,
- amdgpu_dm_connector);
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
}
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
+ if (edid_check_required) {
for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i];
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 9c1871b866cc..09519b7abf67 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -693,6 +693,7 @@ struct amdgpu_dm_connector {
struct drm_display_mode freesync_vid_base;
int psr_skip_count;
+ bool disallow_edp_enter_psr;
/* Record progress status of mst*/
uint8_t mst_status;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index c87b64e464ed..ebabfe3a512f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -571,7 +571,7 @@ static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = stream->out_transfer_func;
+ struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -954,8 +954,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,7 +963,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
regamma_size, has_rom);
if (r)
return r;
@@ -1034,7 +1034,7 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
&degamma_size);
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
/*
* This case isn't fully correct, but also fairly
@@ -1061,12 +1061,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
* map these to the atomic one instead.
*/
if (crtc->cm_is_degamma_srgb)
- dc_plane_state->in_transfer_func->tf = tf;
+ dc_plane_state->in_transfer_func.tf = tf;
else
- dc_plane_state->in_transfer_func->tf =
+ dc_plane_state->in_transfer_func.tf =
TRANSFER_FUNCTION_LINEAR;
- r = __set_input_tf(caps, dc_plane_state->in_transfer_func,
+ r = __set_input_tf(caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (r)
return r;
@@ -1075,12 +1075,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
* For legacy gamma support we need the regamma input
* in linear space. Assume that the input is sRGB.
*/
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = tf;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED;
+ dc_plane_state->in_transfer_func.tf = tf;
if (tf != TRANSFER_FUNCTION_SRGB &&
!mod_color_calculate_degamma_params(caps,
- dc_plane_state->in_transfer_func,
+ &dc_plane_state->in_transfer_func,
NULL, false))
return -ENOMEM;
}
@@ -1114,24 +1114,24 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT)
return -EINVAL;
- dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf);
+ dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf);
if (has_degamma_lut) {
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type =
+ dc_plane_state->in_transfer_func.type =
TF_TYPE_DISTRIBUTED_POINTS;
- ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func,
+ ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (ret)
return ret;
} else {
- dc_plane_state->in_transfer_func->type =
+ dc_plane_state->in_transfer_func.type =
TF_TYPE_PREDEFINED;
if (!mod_color_calculate_degamma_params(color_caps,
- dc_plane_state->in_transfer_func, NULL, false))
+ &dc_plane_state->in_transfer_func, NULL, false))
return -ENOMEM;
}
return 0;
@@ -1156,11 +1156,11 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size);
lut3d_size = lut3d != NULL ? lut3d_size : 0;
- amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func);
+ amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false,
amdgpu_tf_to_dc_tf(shaper_tf),
shaper_size,
- dc_plane_state->in_shaper_func);
+ &dc_plane_state->in_shaper_func);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d shaper LUT failed.\n",
@@ -1175,7 +1175,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
ret = amdgpu_dm_atomic_blend_lut(blend_lut, false,
amdgpu_tf_to_dc_tf(blend_tf),
- blend_size, dc_plane_state->blend_tf);
+ blend_size, &dc_plane_state->blend_tf);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d gamma lut failed.\n",
@@ -1221,8 +1221,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
color_caps = &dc_plane_state->ctx->dc->caps.color;
/* Initially, we can just bypass the DGM block. */
- dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
+ dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR;
/* After, we start to update values according to color props */
has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 6e715ef3a556..e23a0a276e33 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -95,6 +96,61 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features.
+ *
+ * @vblank_work: is a pointer to a struct vblank_control_work object.
+ * @vblank_enabled: indicates whether the DRM vblank counter is currently
+ * enabled (true) or disabled (false).
+ * @allow_sr_entry: represents whether entry into the self-refresh mode is
+ * allowed (true) or not allowed (false).
+ *
+ * The DRM vblank counter enable/disable action is used as the trigger to enable
+ * or disable various panel self-refresh features:
+ *
+ * Panel Replay and PSR SU
+ * - Enable when:
+ * - vblank counter is disabled
+ * - entry is allowed: usermode demonstrates an adequate number of fast
+ * commits)
+ * - CRC capture window isn't active
+ * - Keep enabled even when vblank counter gets enabled
+ *
+ * PSR1
+ * - Enable condition same as above
+ * - Disable when vblank counter is enabled
+ */
+static void amdgpu_dm_crtc_set_panel_sr_feature(
+ struct vblank_control_work *vblank_work,
+ bool vblank_enabled, bool allow_sr_entry)
+{
+ struct dc_link *link = vblank_work->stream->link;
+ bool is_sr_active = (link->replay_settings.replay_allow_active ||
+ link->psr_settings.psr_allow_active);
+ bool is_crc_window_active = false;
+
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ is_crc_window_active =
+ amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
+#endif
+
+ if (link->replay_settings.replay_feature_enabled &&
+ allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+ amdgpu_dm_replay_enable(vblank_work->stream, true);
+ } else if (vblank_enabled) {
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
+ amdgpu_dm_psr_disable(vblank_work->stream);
+ } else if (link->psr_settings.psr_feature_enabled &&
+ allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_psr_enable(vblank_work->stream);
+ }
+}
+
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
{
struct vblank_control_work *vblank_work =
@@ -123,18 +179,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- if (vblank_work->enable) {
- if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
- vblank_work->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
- } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
- !vblank_work->stream->link->psr_settings.psr_allow_active &&
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
-#endif
- vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
- amdgpu_dm_psr_enable(vblank_work->stream);
- }
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ vblank_work, vblank_work->enable,
+ vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
+ vblank_work->stream->link->replay_settings.replay_feature_enabled);
}
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 68a846323912..4d7a5d470b1e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1249,7 +1249,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
size_t size, loff_t *pos)
{
int r;
- uint8_t data[36];
+ uint8_t data[36] = {0};
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dm_crtc_state *acrtc_state;
uint32_t write_size = 36;
@@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
const uint32_t rd_buf_size = 10;
struct pipe_ctx *pipe_ctx;
ssize_t result = 0;
- int i, r, str_len = 30;
+ int i, r, str_len = 10;
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
@@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2936,7 +2960,7 @@ static int psr_read_residency(void *data, u64 *val)
{
struct amdgpu_dm_connector *connector = data;
struct dc_link *link = connector->dc_link;
- u32 residency;
+ u32 residency = 0;
link->dc->link_srv->edp_get_psr_residency(link, &residency);
@@ -2971,6 +2995,53 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val)
return 0;
}
+/* check if kernel disallow eDP enter psr state
+ * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ */
+static int disallow_edp_enter_psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+
+ *val = (u64) aconnector->disallow_edp_enter_psr;
+ return 0;
+}
+
+/* set kernel disallow eDP enter psr state
+ * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ *
+ * usage: test app read crc from PSR eDP rx.
+ *
+ * during kernel boot up, kernel write dpcd 0x170 = 5.
+ * this notify eDP rx psr enable and let rx check crc.
+ * rx fw will start checking crc for rx internal logic.
+ * crc read count within dpcd 0x246 is not updated and
+ * value is 0. when eDP tx driver wants to read rx crc
+ * from dpcd 0x246, 0x270, read count 0 lead tx driver
+ * timeout.
+ *
+ * to avoid this, we add this debugfs to let test app to disbable
+ * rx crc checking for rx internal logic. then test app can read
+ * non-zero crc read count.
+ *
+ * expected app sequence is as below:
+ * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2.
+ * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but
+ * without dpcd 0x170 = 5.
+ * 4. read crc from rx dpcd 0x270, 0x246, etc.
+ * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr.
+ * this will let eDP back to normal with psr setup dpcd 0x170 = 5.
+ */
+static int disallow_edp_enter_psr_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+
+ aconnector->disallow_edp_enter_psr = val ? true : false;
+ return 0;
+}
+
static int dmub_trace_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = data;
@@ -3092,6 +3163,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops,
allow_edp_hotplug_detection_get,
allow_edp_hotplug_detection_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops,
+ disallow_edp_enter_psr_get,
+ disallow_edp_enter_psr_set, "%llu\n");
+
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -3265,6 +3340,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
&edp_ilr_debugfs_fops);
debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector,
&allow_edp_hotplug_detection_fops);
+ debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector,
+ &disallow_edp_enter_psr_fops);
}
for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index b54d646a7c73..e339c7a8d541 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -741,6 +741,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_51 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 941e96f100f4..35733d5c5193 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -791,25 +791,12 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
-{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
-
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
-
- return fec_overhead_multiplier_x1000;
-}
-
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static int kbps_to_peak_pbn(int kbps)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
+ peak_kbps = div_u64(peak_kbps, 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -910,12 +897,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1011,7 +997,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1041,7 +1026,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1054,7 +1039,8 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_peak_pbn(
+ params[next_index].bw_range.max_kbps);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1083,7 +1069,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -1148,7 +1133,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1167,7 +1152,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1175,7 +1160,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1219,8 +1204,10 @@ static bool is_dsc_need_re_compute(
if (dc_link->type != dc_connection_mst_branch)
return false;
- if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
- dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
+ /* add a check for older MST DSC with no virtual DPCDs */
+ if (needs_dsc_aux_workaround(dc_link) &&
+ (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
return false;
for (i = 0; i < MAX_PIPES; i++)
@@ -1240,7 +1227,15 @@ static bool is_dsc_need_re_compute(
continue;
aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
- if (!aconnector)
+ if (!aconnector || !aconnector->dsc_aux)
+ continue;
+
+ /*
+ * check if cached virtual MST DSC caps are available and DSC is supported
+ * as per specifications in their Virtual DPCD registers.
+ */
+ if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
continue;
stream_on_link[new_stream_on_link_num] = aconnector;
@@ -1601,7 +1596,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
- int bpp, pbn, branch_max_throughput_mps = 0;
+ int pbn, branch_max_throughput_mps = 0;
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
@@ -1651,11 +1646,34 @@ enum dc_status dm_dp_mst_is_port_support_mode(
}
}
} else {
- /* check if mode could be supported within full_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
- if (pbn > aconnector->mst_output_port->full_pbn)
+ /* Check if mode could be supported within max slot
+ * number of current mst link and full_pbn of mst links.
+ */
+ int pbn_div, slot_num, max_slot_num;
+ enum dc_link_encoding_format link_encoding;
+ uint32_t stream_kbps =
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->link));
+
+ pbn = kbps_to_peak_pbn(stream_kbps);
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ link_encoding = dc_link_get_highest_encoding_format(stream->link);
+ if (link_encoding == DC_LINK_ENCODING_DP_8b_10b)
+ max_slot_num = 63;
+ else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ max_slot_num = 64;
+ else {
+ DRM_DEBUG_DRIVER("Invalid link encoding format\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ if (slot_num > max_slot_num ||
+ pbn > aconnector->mst_output_port->full_pbn) {
+ DRM_DEBUG_DRIVER("Mode can not be supported within mst links!");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 37c820ab0fdb..fa84d34b7373 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -46,9 +46,6 @@
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
-#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
-#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
-
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 1f08c6564c3b..bfa090432ce2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
* amdgpu_dm_psr_enable() - enable psr f/w
* @stream: stream state
*
- * Return: true if success
*/
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
unsigned int vsync_rate_hz = 0;
@@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
power_opt |= psr_power_opt_z10_static_screen;
- return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+ dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+
+ if (link->ctx->dc->caps.ips_support)
+ dc_allow_idle_optimizations(link->ctx->dc, true);
}
/*
@@ -210,7 +212,7 @@ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
}
/*
- * amdgpu_dm_psr_disable() - disable psr f/w
+ * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams
* if psr is enabled on any stream
*
* Return: true if success
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 6806b3c9c84b..1fdfd183c0d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -32,7 +32,7 @@
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
void amdgpu_dm_set_psr_caps(struct dc_link *link);
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 5ce542b1f860..738a58eebba7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -60,21 +60,26 @@ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connecto
if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT)
return false;
+ // Sink shall populate line deviation information
+ if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 ||
+ dpcd_caps->pr_info.max_deviation_line == 0)
+ return false;
+
return true;
}
/*
- * amdgpu_dm_setup_replay() - setup replay configuration
+ * amdgpu_dm_set_replay_caps() - setup Replay capabilities
* @link: link
* @aconnector: aconnector
*
*/
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
{
- struct replay_config pr_config;
+ struct replay_config pr_config = { 0 };
union replay_debug_flags *debug_flags = NULL;
- // For eDP, if Replay is supported, return true to skip checks
+ // If Replay is already set to support, return true to skip checks
if (link->replay_settings.config.replay_supported)
return true;
@@ -87,27 +92,50 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
if (!link_supports_replay(link, aconnector))
return false;
- // Mark Replay is supported in link and update related attributes
+ // Mark Replay is supported in pr_config
pr_config.replay_supported = true;
- pr_config.replay_power_opt_supported = 0;
- pr_config.replay_enable_option |= pr_enable_option_static_screen;
- pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
-
- if (!pr_config.replay_timing_sync_supported)
- pr_config.replay_enable_option &= ~pr_enable_option_general_ui;
debug_flags = (union replay_debug_flags *)&pr_config.debug_flags;
debug_flags->u32All = 0;
debug_flags->bitfields.visual_confirm =
link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY;
- link->replay_settings.replay_feature_enabled = true;
-
init_replay_config(link, &pr_config);
return true;
}
+/*
+ * amdgpu_dm_link_setup_replay() - configure replay link
+ * @link: link
+ * @aconnector: aconnector
+ *
+ */
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+{
+ struct replay_config *pr_config;
+
+ if (link == NULL || aconnector == NULL)
+ return false;
+
+ pr_config = &link->replay_settings.config;
+
+ if (!pr_config->replay_supported)
+ return false;
+
+ pr_config->replay_power_opt_supported = 0x11;
+ pr_config->replay_smu_opt_supported = false;
+ pr_config->replay_enable_option |= pr_enable_option_static_screen;
+ pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
+ pr_config->replay_timing_sync_supported = false;
+
+ if (!pr_config->replay_timing_sync_supported)
+ pr_config->replay_enable_option &= ~pr_enable_option_general_ui;
+
+ link->replay_settings.replay_feature_enabled = true;
+
+ return true;
+}
/*
* amdgpu_dm_replay_enable() - enable replay f/w
@@ -117,51 +145,23 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
*/
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
{
- uint64_t state;
- unsigned int retry_count;
bool replay_active = true;
- const unsigned int max_retry = 1000;
- bool force_static = true;
struct dc_link *link = NULL;
-
if (stream == NULL)
return false;
link = stream->link;
- if (link == NULL)
- return false;
-
- link->dc->link_srv->edp_setup_replay(link, stream);
-
- link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL);
-
- link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL);
-
- if (wait == true) {
-
- for (retry_count = 0; retry_count <= max_retry; retry_count++) {
- dc_link_get_replay_state(link, &state);
- if (replay_active) {
- if (state != REPLAY_STATE_0 &&
- (!force_static || state == REPLAY_STATE_3))
- break;
- } else {
- if (state == REPLAY_STATE_0)
- break;
- }
- udelay(500);
- }
-
- /* assert if max retry hit */
- if (retry_count >= max_retry)
- ASSERT(0);
- } else {
- /* To-do: Add trace log */
+ if (link) {
+ link->dc->link_srv->edp_setup_replay(link, stream);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ DRM_DEBUG_DRIVER("Enabling replay...\n");
+ link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
+ return true;
}
- return true;
+ return false;
}
/*
@@ -172,12 +172,31 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
*/
bool amdgpu_dm_replay_disable(struct dc_stream_state *stream)
{
+ bool replay_active = false;
+ struct dc_link *link = NULL;
- if (stream->link) {
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+
+ if (link) {
DRM_DEBUG_DRIVER("Disabling replay...\n");
- stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL);
+ link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL);
return true;
}
return false;
}
+
+/*
+ * amdgpu_dm_replay_disable_all() - disable replay f/w
+ * if replay is enabled on any stream
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm)
+{
+ DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n");
+ return dc_set_replay_allow_active(dm->dc, false);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 01cba3cd6246..f0d30eb47312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -40,7 +40,9 @@ enum replay_enable_option {
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable);
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
bool amdgpu_dm_replay_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm);
#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index d9e33c6bccd9..0005f5f8f34f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -52,4 +52,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
+void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+{
+}
+
+void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+{
+}
+
/**** power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 16e72d623630..08c494a7a21b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- return drm_add_modes_noedid(connector, dev->mode_config.max_width,
- dev->mode_config.max_height);
+ /* Maximum resolution supported by DWB */
+ return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 7991ae468f75..4e9fb1742877 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc
+DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 1090d235086a..bd1f60ecaba4 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -101,6 +101,40 @@ void convert_float_matrix(
}
}
+static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
+{
+ struct fixed31_32 result;
+ uint16_t sign_mask = 1 << (fractional_bits + integer_bits);
+ uint16_t value_mask = sign_mask - 1;
+
+ result.value = (long long)(arg & value_mask) <<
+ (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+
+ if (arg & sign_mask)
+ result = dc_fixpt_neg(result);
+
+ return result;
+}
+
+/**
+ * convert_hw_matrix - converts HW values into fixed31_32 matrix.
+ * @matrix: fixed point 31.32 matrix
+ * @reg: array of register values
+ * @buffer_size: size of the array of register values
+ *
+ * Converts HW register spec defined format S2D13 into a fixed-point 31.32
+ * matrix.
+ */
+void convert_hw_matrix(struct fixed31_32 *matrix,
+ uint16_t *reg,
+ uint32_t buffer_size)
+{
+ for (int i = 0; i < buffer_size; ++i)
+ matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13);
+}
+
static uint32_t find_gcd(uint32_t a, uint32_t b)
{
uint32_t remainder;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index 81da4e6f7a1a..a433cef78496 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -41,6 +41,10 @@ void convert_float_matrix(
void reduce_fraction(uint32_t num, uint32_t den,
uint32_t *out_num, uint32_t *out_den);
+void convert_hw_matrix(struct fixed31_32 *matrix,
+ uint16_t *reg,
+ uint32_t buffer_size);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index 39530b2ea495..b30c2cdc1a61 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "resource.h"
#include "dm_services.h"
#include "dce_calcs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 6450853fea94..bc16db69a663 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -44,8 +44,6 @@
#include "bios_parser_common.h"
-#include "dc.h"
-
#define THREE_PERCENT_OF_10000 300
#define LAST_RECORD_TYPE 0xff
@@ -1731,6 +1729,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
return 0;
}
+
/**
* get_ss_entry_number_from_internal_ss_info_tbl_V3_1
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 05f392501c0a..9fe0020bcb9c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1594,8 +1594,6 @@ static bool bios_parser_is_device_id_supported(
return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
break;
}
-
- return false;
}
static uint32_t bios_parser_get_ss_entry_number(
@@ -2948,6 +2946,7 @@ static enum bp_result construct_integrated_info(
result = get_integrated_info_v2_1(bp, info);
break;
case 2:
+ case 3:
result = get_integrated_info_v2_2(bp, info);
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 818a529cacc3..2bcae0643e61 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -37,7 +37,7 @@
#define EXEC_BIOS_CMD_TABLE(command, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GetIndexIntoMasterTable(COMMAND, command), \
- (uint32_t *)&params) == 0)
+ (uint32_t *)&params, sizeof(params)) == 0)
#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
@@ -399,7 +399,7 @@ static enum bp_result transmitter_control_v1_6(
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
- uint8_t crev;
+ uint8_t crev = 0;
if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
frev, crev) == false)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 293a919d605d..cc000833d300 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -49,7 +49,7 @@
#define EXEC_BIOS_CMD_TABLE(fname, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname), \
- (uint32_t *)&params) == 0)
+ (uint32_t *)&params, sizeof(params)) == 0)
#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
@@ -225,7 +225,7 @@ static enum bp_result transmitter_control_fallback(
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
- uint8_t crev;
+ uint8_t crev = 0;
BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 9d347960e2b0..117fc6d4c1de 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -81,6 +81,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
+ case DCN_VERSION_3_51:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 28a2a837d2f0..a2b4ff2cff16 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dal_asic_id.h"
#include "dc_types.h"
#include "dccg.h"
@@ -274,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
- if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ if (ctx->dce_version == DCN_VERSION_2_01) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
@@ -331,16 +329,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
case AMDGPU_FAMILY_GC_11_0_0: {
- struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
-
- if (clk_mgr == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
- dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base;
- break;
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+ dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
}
case AMDGPU_FAMILY_GC_11_0_1: {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..2a5dd3a296b2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
- int dp_ref_clk_khz;
+ int dp_ref_clk_khz = 600000;
int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 60761ff3cbf1..369421e46c52 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -23,9 +23,6 @@
*
*/
-#include <linux/slab.h>
-
-#include "reg_helper.h"
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c
deleted file mode 100644
index 61dd12198a3c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "reg_helper.h"
-#include "clk_mgr_internal.h"
-#include "rv1_clk_mgr_clk.h"
-
-#include "ip/Discovery/hwid.h"
-#include "ip/Discovery/v1/ip_offset_1.h"
-#include "ip/CLK/clk_10_0_default.h"
-#include "ip/CLK/clk_10_0_offset.h"
-#include "ip/CLK/clk_10_0_reg.h"
-#include "ip/CLK/clk_10_0_sh_mask.h"
-
-#include "dce100/dce_clk_mgr.h"
-
-#define CLK_BASE_INNER(inst) \
- CLK_BASE__INST ## inst ## _SEG0
-
-
-#define CLK_REG(reg_name, block, inst)\
- CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## _ ## inst ## _ ## reg_name
-
-#define REG(reg_name) \
- CLK_REG(reg_name, CLK0, 0)
-
-
-/* Only used by testing framework*/
-void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-
- regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk
-
- bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007;
- if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4)
- bypass->dcfclk_bypass = 0;
-
-
- regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider
-
- regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow
-
- regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk
-
- bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007;
- if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4)
- bypass->dispclk_pypass = 0;
-
- regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk
-
- bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007;
- if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4)
- bypass->dprefclk_bypass = 0;
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 89b79dd39628..19897fa52e7e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -26,7 +26,6 @@
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
-#include <linux/delay.h>
#include "rv1_clk_mgr_vbios_smu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 5ee87965a078..bb4f3bd7532e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 9c90090e7351..f77840dd051e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -100,7 +100,15 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dispclk_khz == 0 ||
dc->debug.force_clock_mode & 0x1) {
+ /* this is from resume or boot up, if forced_clock cfg option
+ * used, we bypass program dispclk and DPPCLK, but need set them
+ * for S3.
+ */
+
force_reset = true;
+ /* force_clock_mode 0x1: force reset the clock even it is the
+ * same clock as long as it is in Passive level.
+ */
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
@@ -150,11 +158,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
+ // if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
+ // always update dtos unless clock is lowered and not safe to lower
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0c6a4ab72b1d..5ef0879f6ad9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -548,7 +548,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
@@ -642,7 +642,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
j = -1;
- ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported FCLK DPM levels exceed maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
@@ -707,9 +708,7 @@ void rn_clk_mgr_construct(
int is_green_sardine = 0;
struct clk_log_info log_info = {0};
-#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
-#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 8c9d45e5b13b..23b390245b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -26,6 +26,10 @@
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "rn_clk_mgr_vbios_smu.h"
+
#include <linux/delay.h>
#include "renoir_ip_offset.h"
@@ -33,8 +37,6 @@
#include "mp/mp_12_0_0_offset.h"
#include "mp/mp_12_0_0_sh_mask.h"
-#include "rn_clk_mgr_vbios_smu.h"
-
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
@@ -120,7 +122,10 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
- ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd);
+ if (IS_SMU_TIMEOUT(result)) {
+ ASSERT(0);
+ dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
+ }
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
@@ -185,10 +190,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 3271c8c7905d..8083a553c60e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
@@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct(
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ if (!clk_mgr->base.bw_params) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
+ if (!clk_mgr->wm_range_table) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
}
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index bdbf18306698..3253115a153d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/delay.h>
#include "dcn30_clk_mgr_smu_msg.h"
#include "clk_mgr_internal.h"
@@ -54,6 +53,7 @@
*/
static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
+ const uint32_t initial_max_retries = max_retries;
uint32_t reg = 0;
do {
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- /* Log? */
+ TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
@@ -89,6 +89,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
if (IS_SMU_TIMEOUT(result)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index e4f96b6fd79d..b4fb17b7a096 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include "dcn301_smu.h"
+#include "dm_helpers.h"
#include "vangogh_ip_offset.h"
@@ -120,7 +121,10 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
- ASSERT(result == VBIOSSMC_Result_OK);
+ if (IS_SMU_TIMEOUT(result)) {
+ ASSERT(0);
+ dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
+ }
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
@@ -180,10 +184,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index aa9fd1dc550a..191d8b969d19 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -566,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params(
j = -1;
- ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported FCLK DPM levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index ce1386e22576..12a7752758b8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -562,7 +562,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
j = -1;
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported pstate levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 32279c5db724..f201628e4e98 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/delay.h>
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
@@ -202,10 +201,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 07baa10a8647..c4af406146b7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
- actual_dcfclk_set_mhz,
- actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index 047d19ea919c..78ca1e5c5e9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -37,34 +37,34 @@ typedef enum {
} WCK_RATIO_e;
typedef struct {
- uint32_t FClk;
- uint32_t MemClk;
- uint32_t Voltage;
- uint8_t WckRatio;
- uint8_t Spare[3];
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
} DfPstateTable314_t;
//Freq in MHz
//Voltage in milli volts with 2 fractional bits
typedef struct {
- uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
- uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
- uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
- uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
- uint32_t VClocks[NUM_VCN_DPM_LEVELS];
- uint32_t DClocks[NUM_VCN_DPM_LEVELS];
- uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
- DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
- uint8_t NumDcfClkLevelsEnabled;
- uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
- uint8_t NumSocClkLevelsEnabled;
- uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
- uint8_t NumDfPstatesEnabled;
- uint8_t spare[3];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
- uint32_t MinGfxClk;
- uint32_t MaxGfxClk;
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
} DpmClocks314_t;
struct dcn314_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 644da4637320..5506cf9b3672 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
*/
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
if (safe_to_lower) {
+ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+ dcn315_smu_set_dtbclk(clk_mgr, false);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn315_get_active_display_cnt_wa(dc, context);
@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
} else {
+ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+ dcn315_smu_set_dtbclk(clk_mgr, true);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 1042cf1a3ab0..2d14346b680e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dcn315_smu.h"
#include "mp/mp_13_0_5_offset.h"
+#include "logger_types.h"
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
@@ -69,7 +70,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define REG_NBIO(reg_name) \
(NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
@@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa46d8..20ca7afa9cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
@@ -480,7 +485,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
j = -1;
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported pstate levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
index 3ed19197a755..8b82092b91cd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
@@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index aadd07bc68c5..ff5fdc7b1198 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -29,6 +29,7 @@
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
+#include "dcn32/dcn32_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -40,7 +41,6 @@
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
-#include "dcn32/dcn32_clk_mgr.h"
#include "dml/dcn32/dcn32_fpu.h"
#define DCN_BASE__INST0_SEG1 0x000000C0
@@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
+ /* DPPCLK */
+ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_entries_per_clk->num_dppclk_levels);
+ num_levels = num_entries_per_clk->num_dppclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
@@ -240,13 +250,15 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
+ for (i = 0; i < num_levels; i++)
+ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
- DC_FP_START();
/* WM range table */
dcn32_build_wm_range_table(clk_mgr);
- DC_FP_END();
}
static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
@@ -387,7 +399,15 @@ static void dcn32_update_clocks_update_dentist(
uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+ /*
+ * SMU uses discrete dispclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dispclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+ khz_to_mhz_floor(temp_dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
@@ -426,7 +446,15 @@ static void dcn32_update_clocks_update_dentist(
/* do requested DISPCLK updates*/
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+ /*
+ * SMU uses discrete dispclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dispclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+ khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
@@ -493,6 +521,8 @@ static void dcn32_auto_dpm_test_log(
}
}
+ msleep(5);
+
mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
@@ -682,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
* since we calculate mode support based on softmax being the max UCLK
* frequency.
*/
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ if (dc->debug.disable_dc_mode_overwrite) {
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ } else
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
}
@@ -716,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- !dc->work_arounds.clock_update_disable_mask.uclk)
+ !dc->work_arounds.clock_update_disable_mask.uclk) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
+ max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
+
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
+ }
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
@@ -734,7 +773,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
if (clk_mgr->smu_present && !dpp_clock_lowered)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+ /*
+ * SMU uses discrete dppclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dppclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+ khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
update_dppclk = true;
}
@@ -765,7 +812,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+ /*
+ * SMU uses discrete dppclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dppclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+ khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
@@ -1153,11 +1208,19 @@ void dcn32_clk_mgr_construct(
clk_mgr->smu_present = false;
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ if (!clk_mgr->base.bw_params) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
+ if (!clk_mgr->wm_range_table) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
}
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index df244b175fdb..f2f60478b1a6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -49,6 +49,7 @@
*/
static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
+ const uint32_t initial_max_retries = max_retries;
uint32_t reg = 0;
do {
@@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
+ TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
/* Wait for response */
if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
if (param_out)
@@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
+ TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+
return reg;
}
@@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
/* Wait for response */
if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index a34c258c19dc..5c44ab0e8667 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -36,12 +36,10 @@
#define DALSMC_MSG_SetCabForUclkPstate 0x12
#define DALSMC_Result_OK 0x1
-void
-dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
-void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
-void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
+void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
+void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index e64890259235..6c9b4e6491a5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -73,6 +73,14 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+
#define REG(reg_name) \
(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -244,7 +252,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, false);
+ if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
+ dcn35_smu_set_dtbclk(clk_mgr, false);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -384,19 +393,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn35_smu_enable_pme_wa(clk_mgr);
}
-void dcn35_init_clocks(struct clk_mgr *clk_mgr)
-{
- uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
-
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
bool dcn35_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
@@ -422,6 +418,45 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
+static void init_clk_states(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+ if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
+ clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
@@ -509,6 +544,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@@ -706,7 +763,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
clock_table->NumFclkLevelsEnabled;
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
clock_table->NumDcfClkLevelsEnabled;
for (i = 0; i < num_dcfclk; i++) {
int j;
@@ -833,35 +890,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
-static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
-
- if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
- dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- }
-
- if (!allow_idle) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- }
-
- dcn35_smu_write_ips_scratch(clk_mgr, val);
-}
-
static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -881,16 +909,9 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
return ips_supported;
}
-static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-
- return dcn35_smu_read_ips_scratch(clk_mgr);
-}
-
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
- dcn35_init_clocks(clk_mgr);
+ init_clk_states(clk_mgr);
/* TODO: Implement the functions and remove the ifndef guard */
}
@@ -975,8 +996,6 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
@@ -1053,6 +1072,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index 6d4a1ffab5ed..1399b41dfd1c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -361,32 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@@ -400,7 +400,7 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
clk_mgr,
msg_id,
param);
- smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
+ smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
@@ -447,6 +447,9 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
+ if (!clk_mgr->smu_present)
+ return;
+
dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
@@ -458,6 +461,9 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
int retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
@@ -470,6 +476,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
{
int retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_QueryIPS2Support,
@@ -478,18 +487,3 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
//smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
return retv;
}
-
-void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
-{
- REG_WRITE(MP1_SMN_C2PMSG_71, param);
- //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
-}
-
-uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
-{
- uint32_t retv;
-
- retv = REG_READ(MP1_SMN_C2PMSG_71);
- //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
- return retv;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 2b8e6959a03d..06cd3cc6d36e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -198,6 +198,4 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
-void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param);
-uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_35_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2c424e435962..236876d95185 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,6 +36,7 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -212,7 +213,8 @@ static bool create_links(
connectors_num,
num_virtual_links);
- for (i = 0; i < connectors_num; i++) {
+ // condition loop on link_count to allow skipping invalid indices
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -386,6 +388,30 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
*perf_trace = NULL;
}
+static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
+{
+ if (!dc || !stream || !adjust)
+ return false;
+
+ if (!dc->current_state)
+ return false;
+
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ if (dc->hwss.set_long_vtotal)
+ dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
* @dc: dc reference
@@ -414,10 +440,21 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->optimized_required || dc->wm_optimized_required)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
stream->adjust.v_total_min = adjust->v_total_min;
+ stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
+
+ if (dc->caps.max_v_total != 0 &&
+ (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ if (adjust->allow_otg_v_count_halt)
+ return set_long_vtotal(dc, stream, adjust);
+ else
+ return false;
+ }
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -454,6 +491,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
int i = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -484,6 +523,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
bool ret = false;
struct crtc_position position;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -603,6 +644,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
if (pipe == NULL)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
/* By default, capture the full frame */
param.windowa_x_start = 0;
param.windowa_y_start = 0;
@@ -662,6 +705,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
struct pipe_ctx *pipe;
struct timing_generator *tg;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
@@ -686,6 +731,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
int i;
struct pipe_ctx *pipe_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -721,6 +768,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
if (option > DITHER_OPTION_MAX)
return;
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
stream->dither_option = option;
memset(&params, 0, sizeof(params));
@@ -745,6 +794,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -762,6 +813,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -788,6 +841,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < num_streams; i++) {
struct dc_stream_state *stream = streams[i];
@@ -1035,8 +1090,7 @@ static bool dc_construct(struct dc *dc,
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
-
- dc->current_state = dc_state_create(dc);
+ dc->current_state = dc_state_create(dc, NULL);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
@@ -1252,7 +1306,7 @@ static void disable_vbios_mode_if_required(
if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
unsigned int enc_inst, tg_inst = 0;
- unsigned int pix_clk_100hz;
+ unsigned int pix_clk_100hz = 0;
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (enc_inst != ENGINE_ID_UNKNOWN) {
@@ -1282,6 +1336,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1691,7 +1793,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
if (dc_is_dp_signal(link->connector_signal)) {
- unsigned int pix_clk_100hz;
+ unsigned int pix_clk_100hz = 0;
uint32_t numOdmPipes = 1;
uint32_t id_src[4] = {0};
@@ -1733,6 +1835,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
@@ -1766,6 +1871,8 @@ void dc_enable_stereo(
int i, j;
struct pipe_ctx *pipe;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1785,6 +1892,8 @@ void dc_enable_stereo(
void dc_trigger_sync(struct dc *dc, struct dc_state *context)
{
if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
@@ -1969,6 +2078,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -2015,8 +2129,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* dc_commit_streams - Commit current stream state
*
* @dc: DC object with the commit state to be configured in the hardware
- * @streams: Array with a list of stream state
- * @stream_count: Total of streams
+ * @params: Parameters for the commit, including the streams to be committed
*
* Function responsible for commit streams change to the hardware.
*
@@ -2024,9 +2137,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Return DC_OK if everything work as expected, otherwise, return a dc_status
* code.
*/
-enum dc_status dc_commit_streams(struct dc *dc,
- struct dc_stream_state *streams[],
- uint8_t stream_count)
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
{
int i, j;
struct dc_state *context;
@@ -2035,16 +2146,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct pipe_ctx *pipe;
bool handle_exit_odm2to1 = false;
+ if (!params)
+ return DC_ERROR_UNEXPECTED;
+
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
return res;
- if (!streams_changed(dc, streams, stream_count))
+ if (!streams_changed(dc, params->streams, params->stream_count) &&
+ dc->current_state->power_source == params->power_source)
return res;
- DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
+ dc_exit_ips_for_hw_access(dc);
- for (i = 0; i < stream_count; i++) {
- struct dc_stream_state *stream = streams[i];
+ DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
+
+ for (i = 0; i < params->stream_count; i++) {
+ struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
dc_stream_log(dc, stream);
@@ -2062,7 +2179,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
* scenario, it uses extra pipes than needed to reduce power consumption
* We need to switch off this feature to make room for new streams.
*/
- if (stream_count > dc->current_state->stream_count &&
+ if (params->stream_count > dc->current_state->stream_count &&
dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2078,7 +2195,9 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (!context)
goto context_alloc_fail;
- res = dc_validate_with_context(dc, set, stream_count, context, false);
+ context->power_source = params->power_source;
+
+ res = dc_validate_with_context(dc, set, params->stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -2086,16 +2205,16 @@ enum dc_status dc_commit_streams(struct dc *dc,
res = dc_commit_state_no_check(dc, context);
- for (i = 0; i < stream_count; i++) {
+ for (i = 0; i < params->stream_count; i++) {
for (j = 0; j < context->stream_count; j++) {
- if (streams[i]->stream_id == context->streams[j]->stream_id)
- streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
+ if (params->streams[i]->stream_id == context->streams[j]->stream_id)
+ params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
- if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
+ if (dc_is_embedded_signal(params->streams[i]->signal)) {
+ struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
if (dc->hwss.is_abm_supported)
- status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
+ status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
else
status->is_abm_supported = true;
}
@@ -2428,6 +2547,10 @@ static enum surface_update_type get_scaling_info_update_type(
/* Changing clip size of a large surface may result in MPC slice count change */
update_flags->bits.bandwidth_change = 1;
+ if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
+ update_flags->bits.clip_size_change = 1;
+
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@@ -2441,7 +2564,8 @@ static enum surface_update_type get_scaling_info_update_type(
|| update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.position_change)
+ if (update_flags->bits.position_change ||
+ update_flags->bits.clip_size_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -2734,55 +2858,45 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->layer_index;
}
- if (srf_update->gamma &&
- (surface->gamma_correction !=
- srf_update->gamma)) {
- memcpy(&surface->gamma_correction->entries,
+ if (srf_update->gamma) {
+ memcpy(&surface->gamma_correction.entries,
&srf_update->gamma->entries,
sizeof(struct dc_gamma_entries));
- surface->gamma_correction->is_identity =
+ surface->gamma_correction.is_identity =
srf_update->gamma->is_identity;
- surface->gamma_correction->num_entries =
+ surface->gamma_correction.num_entries =
srf_update->gamma->num_entries;
- surface->gamma_correction->type =
+ surface->gamma_correction.type =
srf_update->gamma->type;
}
- if (srf_update->in_transfer_func &&
- (surface->in_transfer_func !=
- srf_update->in_transfer_func)) {
- surface->in_transfer_func->sdr_ref_white_level =
+ if (srf_update->in_transfer_func) {
+ surface->in_transfer_func.sdr_ref_white_level =
srf_update->in_transfer_func->sdr_ref_white_level;
- surface->in_transfer_func->tf =
+ surface->in_transfer_func.tf =
srf_update->in_transfer_func->tf;
- surface->in_transfer_func->type =
+ surface->in_transfer_func.type =
srf_update->in_transfer_func->type;
- memcpy(&surface->in_transfer_func->tf_pts,
+ memcpy(&surface->in_transfer_func.tf_pts,
&srf_update->in_transfer_func->tf_pts,
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->func_shaper &&
- (surface->in_shaper_func !=
- srf_update->func_shaper))
- memcpy(surface->in_shaper_func, srf_update->func_shaper,
- sizeof(*surface->in_shaper_func));
+ if (srf_update->func_shaper)
+ memcpy(&surface->in_shaper_func, srf_update->func_shaper,
+ sizeof(surface->in_shaper_func));
- if (srf_update->lut3d_func &&
- (surface->lut3d_func !=
- srf_update->lut3d_func))
- memcpy(surface->lut3d_func, srf_update->lut3d_func,
- sizeof(*surface->lut3d_func));
+ if (srf_update->lut3d_func)
+ memcpy(&surface->lut3d_func, srf_update->lut3d_func,
+ sizeof(surface->lut3d_func));
if (srf_update->hdr_mult.value)
surface->hdr_mult =
srf_update->hdr_mult;
- if (srf_update->blend_tf &&
- (surface->blend_tf !=
- srf_update->blend_tf))
- memcpy(surface->blend_tf, srf_update->blend_tf,
- sizeof(*surface->blend_tf));
+ if (srf_update->blend_tf)
+ memcpy(&surface->blend_tf, srf_update->blend_tf,
+ sizeof(surface->blend_tf));
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
@@ -2813,14 +2927,13 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dst.height && update->dst.width)
stream->dst = update->dst;
- if (update->out_transfer_func &&
- stream->out_transfer_func != update->out_transfer_func) {
- stream->out_transfer_func->sdr_ref_white_level =
+ if (update->out_transfer_func) {
+ stream->out_transfer_func.sdr_ref_white_level =
update->out_transfer_func->sdr_ref_white_level;
- stream->out_transfer_func->tf = update->out_transfer_func->tf;
- stream->out_transfer_func->type =
+ stream->out_transfer_func.tf = update->out_transfer_func->tf;
+ stream->out_transfer_func.type =
update->out_transfer_func->type;
- memcpy(&stream->out_transfer_func->tf_pts,
+ memcpy(&stream->out_transfer_func.tf_pts,
&update->out_transfer_func->tf_pts,
sizeof(struct dc_transfer_func_distributed_points));
}
@@ -2921,8 +3034,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
-static void backup_plane_states_for_stream(
- struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+static void backup_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
struct dc_stream_state *stream)
{
int i;
@@ -2931,12 +3044,14 @@ static void backup_plane_states_for_stream(
if (!status)
return;
- for (i = 0; i < status->plane_count; i++)
- plane_states[i] = *status->plane_states[i];
+ for (i = 0; i < status->plane_count; i++) {
+ scratch->plane_states[i] = *status->plane_states[i];
+ }
+ scratch->stream_state = *stream;
}
-static void restore_plane_states_for_stream(
- struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+static void restore_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
struct dc_stream_state *stream)
{
int i;
@@ -2945,10 +3060,69 @@ static void restore_plane_states_for_stream(
if (!status)
return;
- for (i = 0; i < status->plane_count; i++)
- *status->plane_states[i] = plane_states[i];
+ for (i = 0; i < status->plane_count; i++) {
+ *status->plane_states[i] = scratch->plane_states[i];
+ }
+ *stream = scratch->stream_state;
}
+/**
+ * update_seamless_boot_flags() - Helper function for updating seamless boot flags
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ *
+ * Updating seamless boot flags do not need to be part of the commit sequence. This
+ * helper function will update the seamless boot flags on each flip (if required)
+ * outside of the HW commit sequence (fast or slow).
+ *
+ * Return: void
+ */
+static void update_seamless_boot_flags(struct dc *dc,
+ struct dc_state *context,
+ int surface_count,
+ struct dc_stream_state *stream)
+{
+ if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
+ */
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->optimized_required = true;
+ }
+ }
+}
+
+/**
+ * update_planes_and_stream_state() - The function takes planes and stream
+ * updates as inputs and determines the appropriate update type. If update type
+ * is FULL, the function allocates a new context, populates and validates it.
+ * Otherwise, it updates current dc context. The function will return both
+ * new_context and new_update_type back to the caller. The function also backs
+ * up both current and new contexts into corresponding dc state scratch memory.
+ * TODO: The function does too many things, and even conditionally allocates dc
+ * context memory implicitly. We should consider to break it down.
+ *
+ * @dc: Current DC state
+ * @srf_updates: an array of surface updates
+ * @surface_count: surface update count
+ * @stream: Corresponding stream to be updated
+ * @stream_update: stream update
+ * @new_update_type: [out] determined update type by the function
+ * @new_context: [out] new context allocated and validated if update type is
+ * FULL, reference to current context if update type is less than FULL.
+ *
+ * Return: true if a valid update is populated into new_context, false
+ * otherwise.
+ */
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -2972,9 +3146,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
- backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
copy_stream_update_to_stream(dc, context, stream, stream_update);
@@ -3043,7 +3218,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- if (update_type >= UPDATE_TYPE_MED) {
+ if (update_type != UPDATE_TYPE_MED)
+ continue;
+ if (surface->update_flags.bits.clip_size_change ||
+ surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3060,19 +3238,13 @@ static bool update_planes_and_stream_state(struct dc *dc,
BREAK_TO_DEBUGGER();
goto fail;
}
-
- for (i = 0; i < context->stream_count; i++) {
- struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
- context->streams[i]);
-
- if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
- resource_build_test_pattern_params(&context->res_ctx, otg_master);
- }
}
+ update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
*new_update_type = update_type;
- backup_plane_states_for_stream(context->scratch.plane_states, stream);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
@@ -3161,12 +3333,26 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->pending_test_pattern) {
- dc_link_dp_set_test_pattern(stream->link,
+ /*
+ * test pattern params depends on ODM topology
+ * changes that we could be applying to front
+ * end. Since at the current stage front end
+ * changes are not yet applied. We can only
+ * apply test pattern in hw based on current
+ * state and populate the final test pattern
+ * params in new state. If current and new test
+ * pattern params are different as result of
+ * different ODM topology being used, it will be
+ * detected and handle during front end
+ * programming update.
+ */
+ dc->link_srv->dp_set_test_pattern(stream->link,
stream->test_pattern.type,
stream->test_pattern.color_space,
stream->test_pattern.p_link_settings,
stream->test_pattern.p_custom_pattern,
stream->test_pattern.cust_pattern_size);
+ resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
}
if (stream_update->dpms_off) {
@@ -3223,6 +3409,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
if (stream->link->replay_settings.config.replay_supported)
return true;
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
return false;
}
@@ -3260,6 +3449,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
if (srf_updates[i].surface->flip_immediate)
continue;
+ update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
sizeof(flip_addr->dirty_rects));
@@ -3376,6 +3566,9 @@ static void commit_planes_for_stream_fast(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
+
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
@@ -3431,7 +3624,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->block_sequence,
&(context->block_sequence_steps),
top_pipe_to_program,
- stream_status);
+ stream_status,
+ context);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -3444,7 +3638,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3482,6 +3676,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -3503,10 +3698,23 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
@@ -3541,17 +3749,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
-
- if (dc->debug.visual_confirm)
- dc_update_visual_confirm_color(dc, context, pipe);
- }
- }
-
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -3957,24 +4154,14 @@ struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
enum pipe_split_policy mpc_policy;
+ char force_odm[MAX_PIPES];
};
-static void release_minimal_transition_state(struct dc *dc,
- struct dc_state *context, struct pipe_split_policy_backup *policy)
-{
- dc_state_release(context);
- /* restore previous pipe split and odm policy */
- if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = policy->mpc_policy;
- dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
- dc->debug.force_disable_subvp = policy->subvp_policy;
-}
-
-static struct dc_state *create_minimal_transition_state(struct dc *dc,
- struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = NULL;
- unsigned int i, j;
+ int i;
if (!dc->config.is_vmin_only_asic) {
policy->mpc_policy = dc->debug.pipe_split_policy;
@@ -3984,82 +4171,252 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
dc->debug.enable_single_display_2to1_odm_policy = false;
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
+ for (i = 0; i < context->stream_count; i++) {
+ policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
+ context->streams[i]->debug.force_odm_combine_segments = 0;
+ }
+}
+
+static void restore_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ uint8_t i;
+
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy =
+ policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
+}
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *minimal_transition_context,
+ struct dc_state *base_context,
+ struct pipe_split_policy_backup *policy)
+{
+ restore_minimal_pipe_split_policy(dc, base_context, policy);
+ dc_state_release(minimal_transition_context);
+}
+
+static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
+{
+ uint8_t i;
+ int j;
+ struct dc_stream_status *stream_status;
+
+ for (i = 0; i < context->stream_count; i++) {
+ stream_status = &context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++)
+ stream_status->plane_states[j]->flip_immediate = false;
+ }
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = NULL;
minimal_transition_context = dc_state_create_copy(base_context);
if (!minimal_transition_context)
return NULL;
+ backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
- for (i = 0; i < minimal_transition_context->stream_count; i++) {
- struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
-
- for (j = 0; j < stream_status->plane_count; j++) {
- struct dc_plane_state *plane_state = stream_status->plane_states[j];
-
- /* force vsync flip when reconfiguring pipes to prevent underflow
- * and corruption
- */
- plane_state->flip_immediate = false;
- }
- }
+ /* prevent underflow and corruption when reconfiguring pipes */
+ force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
- /* this should never happen */
- release_minimal_transition_state(dc, minimal_transition_context, policy);
+ /*
+ * This should never happen, minimal transition state should
+ * always be validated first before adding pipe split features.
+ */
+ release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
BREAK_TO_DEBUGGER();
minimal_transition_context = NULL;
}
return minimal_transition_context;
}
-static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *stream)
+static bool is_pipe_topology_transition_seamless_with_intermediate_step(
+ struct dc *dc,
+ struct dc_state *initial_state,
+ struct dc_state *intermediate_state,
+ struct dc_state *final_state)
+{
+ return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
+ intermediate_state) &&
+ dc->hwss.is_pipe_topology_transition_seamless(dc,
+ intermediate_state, final_state);
+}
+
+static void swap_and_release_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_state *old = dc->current_state;
+ struct pipe_ctx *pipe_ctx;
+
+ /* Since memory free requires elevated IRQ, an interrupt
+ * request is generated by mem free. If this happens
+ * between freeing and reassigning the context, our vsync
+ * interrupt will call into dc and cause a memory
+ * corruption. Hence, we first reassign the context,
+ * then free the old context.
+ */
+ dc->current_state = new_context;
+ dc_state_release(old);
+
+ // clear any forced full updates
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+}
+
+static int initialize_empty_surface_updates(
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates)
+{
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+ int i;
+
+ if (!status)
+ return 0;
+
+ for (i = 0; i < status->plane_count; i++)
+ srf_updates[i].surface = status->plane_states[i];
+
+ return status->plane_count;
+}
+
+static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
{
bool success = false;
- struct dc_state *minimal_transition_context;
struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context =
+ create_minimal_transition_state(dc, new_context,
+ &policy);
- /* commit based on new context */
- /* Since all phantom pipes are removed in full validation,
- * we have to save and restore the subvp/mall config when
- * we do a minimal transition since the flags marking the
- * pipe as subvp/phantom will be cleared (dc copy constructor
- * creates a shallow copy).
- */
- minimal_transition_context = create_minimal_transition_state(dc,
- context, &policy);
- if (minimal_transition_context) {
- if (dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, minimal_transition_context) &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, minimal_transition_context, context)) {
- DC_LOG_DC("%s base = new state\n", __func__);
-
- success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = new state\n");
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
}
- release_minimal_transition_state(dc, minimal_transition_context, &policy);
- }
-
- if (!success) {
- /* commit based on current context */
- restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
- minimal_transition_context = create_minimal_transition_state(dc,
- dc->current_state, &policy);
- if (minimal_transition_context) {
- if (dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, minimal_transition_context) &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, minimal_transition_context, context)) {
- DC_LOG_DC("%s base = current state\n", __func__);
- success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
- }
- release_minimal_transition_state(dc, minimal_transition_context, &policy);
+ release_minimal_transition_state(
+ dc, intermediate_context, new_context, &policy);
+ }
+ return success;
+}
+
+static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context;
+ struct dc_state *old_current_state = dc->current_state;
+ struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ int surface_count;
+
+ /*
+ * Both current and new contexts share the same stream and plane state
+ * pointers. When new context is validated, stream and planes get
+ * populated with new updates such as new plane addresses. This makes
+ * the current context no longer valid because stream and planes are
+ * modified from the original. We backup current stream and plane states
+ * into scratch space whenever we are populating new context. So we can
+ * restore the original values back by calling the restore function now.
+ * This restores back the original stream and plane states associated
+ * with the current state.
+ */
+ restore_planes_and_stream_state(&dc->scratch.current_state, stream);
+ dc_state_retain(old_current_state);
+ intermediate_context = create_minimal_transition_state(dc,
+ old_current_state, &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = current state\n");
+ surface_count = initialize_empty_surface_updates(
+ stream, srf_updates);
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
}
- restore_plane_states_for_stream(context->scratch.plane_states, stream);
+ release_minimal_transition_state(dc, intermediate_context,
+ old_current_state, &policy);
}
+ dc_state_release(old_current_state);
+ /*
+ * Restore stream and plane states back to the values associated with
+ * new context.
+ */
+ restore_planes_and_stream_state(&dc->scratch.new_state, stream);
+ return success;
+}
- ASSERT(success);
+/**
+ * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
+ * on current or new context
+ *
+ * @dc: DC structure, used to get the current state
+ * @new_context: New context
+ * @stream: Stream getting the update for the flip
+ * @srf_updates: Surface updates
+ * @surface_count: Number of surfaces
+ *
+ * The function takes in current state and new state and determine a minimal
+ * transition state as the intermediate step which could make the transition
+ * between current and new states seamless. If found, it will commit the minimal
+ * transition state and update current state to this minimal transition state
+ * and return true, if not, it will return false.
+ *
+ * Return:
+ * Return True if the minimal transition succeeded, false otherwise
+ */
+static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = commit_minimal_transition_based_on_new_context(
+ dc, new_context, stream, srf_updates,
+ surface_count);
+ if (!success)
+ success = commit_minimal_transition_based_on_current_context(dc,
+ new_context, stream);
+ if (!success)
+ DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
return success;
}
@@ -4142,12 +4499,14 @@ static bool commit_minimal_transition_state(struct dc *dc,
dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
"Unknown");
+ dc_state_retain(transition_base_context);
transition_context = create_minimal_transition_state(dc,
transition_base_context, &policy);
if (transition_context) {
ret = dc_commit_state_no_check(dc, transition_context);
- release_minimal_transition_state(dc, transition_context, &policy);
+ release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
}
+ dc_state_release(transition_base_context);
if (ret != DC_OK) {
/* this should never happen */
@@ -4165,41 +4524,6 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
-/**
- * update_seamless_boot_flags() - Helper function for updating seamless boot flags
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @surface_count: Number of surfaces that have an updated
- * @stream: Corresponding stream to be updated in the current flip
- *
- * Updating seamless boot flags do not need to be part of the commit sequence. This
- * helper function will update the seamless boot flags on each flip (if required)
- * outside of the HW commit sequence (fast or slow).
- *
- * Return: void
- */
-static void update_seamless_boot_flags(struct dc *dc,
- struct dc_state *context,
- int surface_count,
- struct dc_stream_state *stream)
-{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
- /* Optimize seamless boot flag keeps clocks and watermarks high until
- * first flip. After first flip, optimization is required to lower
- * bandwidth. Important to note that it is expected UEFI will
- * only light up a single display on POST, therefore we only expect
- * one stream with seamless boot flag set.
- */
- if (stream->apply_seamless_boot_optimization) {
- stream->apply_seamless_boot_optimization = false;
-
- if (get_seamless_boot_stream_count(context) == 0)
- dc->optimized_required = true;
- }
- }
-}
-
static void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -4319,61 +4643,131 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
+static bool update_planes_and_stream_v1(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
- struct dc_state *context)
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
{
- struct pipe_ctx *cur_pipe, *new_pipe;
- bool cur_is_odm_in_use, new_is_odm_in_use;
- struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
- struct dc_stream_status *new_stream_status = stream_get_status(context, stream);
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+ struct dc_state *context;
+ struct dc_context *dc_ctx = dc->ctx;
+ int i, j;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
- if (!dc->debug.enable_single_display_2to1_odm_policy ||
- !dc->config.enable_windowed_mpo_odm)
- /* skip the check if windowed MPO ODM or dynamic ODM is turned
- * off.
- */
- return false;
+ dc_exit_ips_for_hw_access(dc);
- if (context == dc->current_state)
- /* skip the check for fast update */
- return false;
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
- if (new_stream_status->plane_count != cur_stream_status->plane_count)
- /* plane count changed, not a plane scaling update so not the
- * case we are looking for
- */
- return false;
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
- cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
- new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
- if (!cur_pipe || !new_pipe)
- return false;
- cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
- new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
- if (cur_is_odm_in_use == new_is_odm_in_use)
- /* ODM state isn't changed, not the case we are looking for */
- return false;
+ if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->hwss.is_pipe_topology_transition_seamless &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, context))
- /* transition can be achieved without the need for committing
- * minimal transition state first
+ /* initialize scratch memory for building context */
+ context = dc_state_create_copy(state);
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return false;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
+ }
+ } else if (update_type == UPDATE_TYPE_FAST) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
*/
- return false;
+ dc_post_update_surfaces_to_stream(dc);
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ copy_surface_update_to_plane(surface, &srf_updates[i]);
+
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ copy_stream_update_to_stream(dc, context, stream, stream_update);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_state_release(context);
+ return false;
+ }
+ }
+
+ TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
+
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ /*update current_State*/
+ if (dc->current_state != context) {
+
+ struct dc_state *old = dc->current_state;
+ dc->current_state = context;
+ dc_state_release(old);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+ }
+
+ /* Legacy optimization path for DCE. */
+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
+ dc_post_update_surfaces_to_stream(dc);
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ }
return true;
}
-bool dc_update_planes_and_stream(struct dc *dc,
+static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
enum surface_update_type update_type;
- int i;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
@@ -4397,7 +4791,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
!commit_minimal_transition_state(dc, dc->current_state))
- return false;
+ return false;
if (!update_planes_and_stream_state(
dc,
@@ -4411,12 +4805,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
/* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
- /* Since all phantom pipes are removed in full validation,
- * we have to save and restore the subvp/mall config when
- * we do a minimal transition since the flags marking the
- * pipe as subvp/phantom will be cleared (dc copy constructor
- * creates a shallow copy).
- */
if (!commit_minimal_transition_state(dc, context)) {
dc_state_release(context);
return false;
@@ -4424,20 +4812,12 @@ bool dc_update_planes_and_stream(struct dc *dc,
update_type = UPDATE_TYPE_FULL;
}
- /* when windowed MPO ODM is supported, we need to handle a special case
- * where we can transition between ODM combine and MPC combine due to
- * plane scaling update. This transition will require us to commit
- * minimal transition state. The condition to trigger this update can't
- * be predicted by could_mpcc_tree_change_for_active_pipes because we
- * can only determine it after DML validation. Therefore we can't rely
- * on the existing commit minimal transition state sequence. Instead
- * we have to add additional handling here to handle this transition
- * with its own special sequence.
- */
- if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context))
- commit_minimal_transition_state_for_windowed_mpo_odm(dc,
- context, stream);
- update_seamless_boot_flags(dc, context, surface_count, stream);
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ commit_minimal_transition_state_in_dc_update(dc, context, stream,
+ srf_updates, surface_count);
+
if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
@@ -4463,141 +4843,33 @@ bool dc_update_planes_and_stream(struct dc *dc,
update_type,
context);
}
-
- if (dc->current_state != context) {
-
- /* Since memory free requires elevated IRQL, an interrupt
- * request is generated by mem free. If this happens
- * between freeing and reassigning the context, our vsync
- * interrupt will call into dc and cause a memory
- * corruption BSOD. Hence, we first reassign the context,
- * then free the old context.
- */
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- // clear any forced full updates
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
+ if (dc->current_state != context)
+ swap_and_release_current_context(dc, context, stream);
return true;
}
-void dc_commit_updates_for_stream(struct dc *dc,
- struct dc_surface_update *srf_updates,
- int surface_count,
+static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
- struct dc_state *state)
+ enum surface_update_type update_type)
{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
-
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs.
- */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
-
- dc_update_planes_and_stream(dc, srf_updates,
- surface_count, stream,
- stream_update);
- return;
- }
-
- if (update_type >= update_surface_trace_level)
- update_surface_trace(dc, srf_updates, surface_count);
-
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
+ ASSERT(update_type < UPDATE_TYPE_FULL);
+ populate_fast_updates(fast_update, srf_updates, surface_count,
+ stream_update);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count,
+ stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
- context);
- } else {
+ dc->current_state);
+ else
commit_planes_for_stream(
dc,
srf_updates,
@@ -4605,32 +4877,133 @@ void dc_commit_updates_for_stream(struct dc *dc,
stream,
stream_update,
update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
+ dc->current_state);
+}
- struct dc_state *old = dc->current_state;
+static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *new_context)
+{
+ ASSERT(update_type >= UPDATE_TYPE_FULL);
+ if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
+ dc->current_state, new_context))
+ /*
+ * It is required by the feature design that all pipe topologies
+ * using extra free pipes for power saving purposes such as
+ * dynamic ODM or SubVp shall only be enabled when it can be
+ * transitioned seamlessly to AND from its minimal transition
+ * state. A minimal transition state is defined as the same dc
+ * state but with all power saving features disabled. So it uses
+ * the minimum pipe topology. When we can't seamlessly
+ * transition from state A to state B, we will insert the
+ * minimal transition state A' or B' in between so seamless
+ * transition between A and B can be made possible.
+ */
+ commit_minimal_transition_state_in_dc_update(dc, new_context,
+ stream, srf_updates, surface_count);
- dc->current_state = context;
- dc_state_release(old);
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ new_context);
+}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+static bool update_planes_and_stream_v3(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *new_context;
+ enum surface_update_type update_type;
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
+ /*
+ * When this function returns true and new_context is not equal to
+ * current state, the function allocates and validates a new dc state
+ * and assigns it to new_context. The function expects that the caller
+ * is responsible to free this memory when new_context is no longer
+ * used. We swap current with new context and free current instead. So
+ * new_context's memory will live until the next full update after it is
+ * replaced by a newer context. Refer to the use of
+ * swap_and_free_current_context below.
+ */
+ if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
+ stream, stream_update, &update_type,
+ &new_context))
+ return false;
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ if (new_context == dc->current_state) {
+ commit_planes_and_stream_update_on_current_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type);
+ } else {
+ commit_planes_and_stream_update_with_new_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type, new_context);
+ swap_and_release_current_context(dc, new_context, stream);
}
- return;
+ return true;
+}
+bool dc_update_planes_and_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /*
+ * update planes and stream version 3 separates FULL and FAST updates
+ * to their own sequences. It aims to clean up frequent checks for
+ * update type resulting unnecessary branching in logic flow. It also
+ * adds a new commit minimal transition sequence, which detects the need
+ * for minimal transition based on the actual comparison of current and
+ * new states instead of "predicting" it based on per feature software
+ * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
+ * minimal transition sequence is made universal to any power saving
+ * features that would use extra free pipes such as Dynamic ODM/MPC
+ * Combine, MPO or SubVp. Therefore there is no longer a need to
+ * specially handle compatibility problems with transitions among those
+ * features as they are now transparent to the new sequence.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51)
+ return update_planes_and_stream_v3(dc, srf_updates,
+ surface_count, stream, stream_update);
+ return update_planes_and_stream_v2(dc, srf_updates,
+ surface_count, stream, stream_update);
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /* TODO: Since change commit sequence can have a huge impact,
+ * we decided to only enable it for DCN3x. However, as soon as
+ * we get more confident about this change we'll need to enable
+ * the new sequence for all ASICs.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51) {
+ update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
@@ -4673,8 +5046,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
void dc_power_down_on_boot(struct dc *dc)
{
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
- dc->hwss.power_down_on_boot)
+ dc->hwss.power_down_on_boot) {
+
+ if (dc->caps.ips_support)
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.power_down_on_boot(dc);
+ }
}
void dc_set_power_state(
@@ -4688,6 +5066,8 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_state_construct(dc, dc->current_state);
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
dc->hwss.init_hw(dc);
@@ -4810,11 +5190,15 @@ bool dc_set_replay_allow_active(struct dc *dc, bool active)
return true;
}
-void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
if (dc->debug.disable_idle_power_optimizations)
return;
+ if (allow != dc->idle_optimizations_allowed)
+ DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
+ dc->idle_optimizations_allowed, allow, caller_name);
+
if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return;
@@ -4829,24 +5213,24 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
-bool dc_dmub_is_ips_idle_state(struct dc *dc)
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
{
- uint32_t idle_state = 0;
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations_internal(dc, false, caller_name);
+}
+bool dc_dmub_is_ips_idle_state(struct dc *dc)
+{
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false;
- if (dc->hwss.get_idle_state)
- idle_state = dc->hwss.get_idle_state(dc);
-
- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
- !(idle_state & DMUB_IPS2_ALLOW_MASK))
- return true;
+ if (!dc->ctx->dmub_srv)
+ return false;
- return false;
+ return dc->ctx->dmub_srv->idle_allowed;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
@@ -4966,10 +5350,13 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
}
dc->clk_mgr->dc_mode_softmax_enabled = enable;
}
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr)
{
- if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 9c05b1a07142..5c1d3017aefd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -392,10 +392,10 @@ void get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set border color to red */
color->color_r_cr = color_value;
- } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 ARGB2101010 - set border color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
@@ -403,10 +403,10 @@ void get_hdr_visual_confirm_color(
is_sdr = true;
break;
case PIXEL_FORMAT_FP16:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, FP16 - set border color to blue */
color->color_b_cb = color_value;
- } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 HDR - set border color to green */
color->color_g_y = color_value;
} else
@@ -558,9 +558,10 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
- int *num_steps,
+ unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
- struct dc_stream_status *stream_status)
+ struct dc_stream_status *stream_status,
+ struct dc_state *context)
{
struct dc_plane_state *plane = pipe_ctx->plane_state;
struct dc_stream_state *stream = pipe_ctx->stream;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 9fbdb09697fd..15819416a2f3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -73,6 +73,7 @@
#include "dcn32/dcn32_resource.h"
#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
+#include "dcn351/dcn351_resource.h"
#define VISUAL_CONFIRM_BASE_DEFAULT 3
#define VISUAL_CONFIRM_BASE_MIN 1
@@ -195,6 +196,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
break;
case AMDGPU_FAMILY_GC_11_5_0:
dc_version = DCN_VERSION_3_5;
+ if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_3_51;
break;
default:
dc_version = DCE_VERSION_UNKNOWN;
@@ -303,6 +306,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_5:
res_pool = dcn35_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_3_51:
+ res_pool = dcn351_create_resource_pool(init_data, dc);
+ break;
#endif /* CONFIG_DRM_AMD_DC_FP */
default:
break;
@@ -334,7 +340,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
return res_pool;
}
-void dc_destroy_resource_pool(struct dc *dc)
+void dc_destroy_resource_pool(struct dc *dc)
{
if (dc) {
if (dc->res_pool)
@@ -1451,6 +1457,9 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
controller_color_space = convert_dp_to_controller_color_space(
otg_master->stream->test_pattern.color_space);
+ if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE)
+ return;
+
odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads);
odm_slice_width = h_active / odm_cnt;
@@ -1479,6 +1488,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
bool res = false;
+
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
@@ -1490,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return false;
}
- pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
- pipe_ctx->plane_state->format);
-
/* Timing borders are part of vactive that we are also supposed to skip in addition
* to any stream dst offset. Since dm logic assumes dst is in addressable
* space we need to add the left and top borders to dst offsets temporarily.
@@ -1504,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
/* Calculate H and V active size */
pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+ pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ pipe_ctx->plane_state->format);
/* depends on h_active */
calculate_recout(pipe_ctx);
@@ -1788,6 +1797,30 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
return free_pipe_idx;
}
+int resource_find_free_pipe_used_as_cur_sec_dpp(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
+ !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
+ }
+
+ return free_pipe_idx;
+}
+
int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
@@ -1834,23 +1867,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
{
-#ifdef DBG
- if (pipe_ctx->stream == NULL) {
- /* a free pipe with dangling states */
- ASSERT(!pipe_ctx->plane_state);
- ASSERT(!pipe_ctx->prev_odm_pipe);
- ASSERT(!pipe_ctx->next_odm_pipe);
- ASSERT(!pipe_ctx->top_pipe);
- ASSERT(!pipe_ctx->bottom_pipe);
- } else if (pipe_ctx->top_pipe) {
- /* a secondary DPP pipe must be signed to a plane */
- ASSERT(pipe_ctx->plane_state)
- }
- /* Add more checks here to prevent corrupted pipe ctx. It is very hard
- * to debug this issue afterwards because we can't pinpoint the code
- * location causing inconsistent pipe context states.
- */
-#endif
switch (type) {
case OTG_MASTER:
return !pipe_ctx->prev_odm_pipe &&
@@ -2179,50 +2195,91 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
}
}
-void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
+ struct pipe_ctx *otg_master, int stream_idx)
{
- struct pipe_ctx *otg_master;
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
- int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
+ int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
bool is_primary;
DC_LOGGER_INIT(dc->ctx->logger);
+ slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &state->res_ctx, opp_heads);
+ for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ plane_idx = -1;
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &state->res_ctx,
+ dpp_pipes);
+ for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+ is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
+ dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
+ if (is_primary)
+ plane_idx++;
+ resource_log_pipe(dc, dpp_pipes[dpp_idx],
+ stream_idx, slice_idx,
+ plane_idx, slice_count,
+ is_primary);
+ }
+ } else {
+ resource_log_pipe(dc, opp_heads[slice_idx],
+ stream_idx, slice_idx, plane_idx,
+ slice_count, true);
+ }
+
+ }
+}
+
+static int resource_stream_to_stream_idx(struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ int i, stream_idx = -1;
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_idx = i;
+ break;
+ }
+
+ /* never return negative array index */
+ if (stream_idx == -1) {
+ ASSERT(0);
+ return 0;
+ }
+
+ return stream_idx;
+}
+
+void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+{
+ struct pipe_ctx *otg_master;
+ int stream_idx, phantom_stream_idx;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ if (state->streams[stream_idx]->is_phantom)
+ continue;
+
otg_master = resource_get_otg_master_for_stream(
&state->res_ctx, state->streams[stream_idx]);
- if (!otg_master || otg_master->stream_res.tg == NULL) {
- DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
- return;
- }
- slice_count = resource_get_opp_heads_for_otg_master(otg_master,
- &state->res_ctx, opp_heads);
- for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
- plane_idx = -1;
- if (opp_heads[slice_idx]->plane_state) {
- dpp_count = resource_get_dpp_pipes_for_opp_head(
- opp_heads[slice_idx],
- &state->res_ctx,
- dpp_pipes);
- for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
- is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
- dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
- if (is_primary)
- plane_idx++;
- resource_log_pipe(dc, dpp_pipes[dpp_idx],
- stream_idx, slice_idx,
- plane_idx, slice_count,
- is_primary);
- }
- } else {
- resource_log_pipe(dc, opp_heads[slice_idx],
- stream_idx, slice_idx, plane_idx,
- slice_count, true);
- }
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ }
+ if (state->phantom_stream_count > 0) {
+ DC_LOG_DC(" | (phantom pipes) |");
+ for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
+ continue;
+ phantom_stream_idx = resource_stream_to_stream_idx(state,
+ state->stream_status[stream_idx].mall_stream_config.paired_stream);
+ otg_master = resource_get_otg_master_for_stream(
+ &state->res_ctx, state->streams[phantom_stream_idx]);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2277,6 +2334,9 @@ static bool update_pipe_params_after_odm_slice_count_change(
if (pool->funcs->build_pipe_pix_clk_params)
pool->funcs->build_pipe_pix_clk_params(otg_master);
+
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+
return result;
}
@@ -2635,13 +2695,19 @@ bool resource_append_dpp_pipes_for_plane_composition(
struct pipe_ctx *otg_master_pipe,
struct dc_plane_state *plane_state)
{
+ bool success;
if (otg_master_pipe->plane_state == NULL)
- return add_plane_to_opp_head_pipes(otg_master_pipe,
+ success = add_plane_to_opp_head_pipes(otg_master_pipe,
plane_state, new_ctx);
else
- return acquire_secondary_dpp_pipes_and_add_plane(
+ success = acquire_secondary_dpp_pipes_and_add_plane(
otg_master_pipe, plane_state, new_ctx,
cur_ctx, pool);
+ if (success)
+ /* when appending a plane mpc slice count changes from 0 to 1 */
+ success = update_pipe_params_after_mpc_slice_count_change(
+ plane_state, new_ctx, pool);
+ return success;
}
void resource_remove_dpp_pipes_for_plane_composition(
@@ -2976,7 +3042,7 @@ bool resource_update_pipes_for_plane_with_slice_count(
int i;
int dpp_pipe_count;
int cur_slice_count;
- struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
bool result = true;
dpp_pipe_count = resource_get_dpp_pipes_for_plane(plane,
@@ -3128,6 +3194,9 @@ static struct audio *find_first_free_audio(
{
int i, available_audio_count;
+ if (id == ENGINE_ID_UNKNOWN)
+ return NULL;
+
available_audio_count = pool->audio_count;
for (i = 0; i < available_audio_count; i++) {
@@ -3382,11 +3451,31 @@ static bool acquire_otg_master_pipe_for_stream(
* any free pipes already used in current context as this could tear
* down exiting ODM/MPC/MPO configuration unnecessarily.
*/
+
+ /*
+ * Try to acquire the same OTG master already in use. This is not
+ * optimal because resetting an enabled OTG master pipe for a new stream
+ * requires an extra frame of wait. However there are test automation
+ * and eDP assumptions that rely on reusing the same OTG master pipe
+ * during mode change. We have to keep this logic as is for now.
+ */
pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ /*
+ * Try to acquire a pipe not used in current resource context to avoid
+ * pipe swapping.
+ */
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ /*
+ * If pipe swapping is unavoidable, try to acquire pipe used as
+ * secondary DPP pipe in current state as we prioritize to support more
+ * streams over supporting MPO planes.
+ */
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
@@ -4001,7 +4090,7 @@ static void set_avi_info_frame(
}
if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
hdmi_info.bits.EC0_EC2 = 0;
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
}
@@ -5003,3 +5092,39 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st
return false;
}
+
+void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
+{
+ dml2_options->callbacks.dc = dc;
+ dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params;
+ dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params;
+ dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
+ dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
+ dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
+ dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
+ dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count;
+ dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
+ dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count;
+ dml2_options->callbacks.get_opp_head = &resource_get_opp_head;
+ dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream;
+ dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master;
+ dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane;
+ dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
+ dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
+
+ dml2_options->svp_pstate.callbacks.dc = dc;
+ dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
+ dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
+ dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
+ dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
+ dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 5f6392ae31a6..cd6570a1e20e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,7 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
- notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
+ notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, notify->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 180ac47868c2..76bb05f4d6bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -188,8 +188,11 @@ static void init_state(struct dc *dc, struct dc_state *state)
}
/* Public dc_state functions */
-struct dc_state *dc_state_create(struct dc *dc)
+struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_options;
+#endif
struct dc_state *state = kvzalloc(sizeof(struct dc_state),
GFP_KERNEL);
@@ -198,10 +201,16 @@ struct dc_state *dc_state_create(struct dc *dc)
init_state(dc, state);
dc_state_construct(dc, state);
+ state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC;
#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2)
- dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+ if (dc->debug.using_dml2) {
+ dml2_opt->use_clock_dc_limits = false;
+ dml2_create(dc, dml2_opt, &state->bw_ctx.dml2);
+
+ dml2_opt->use_clock_dc_limits = true;
+ dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
+ }
#endif
kref_init(&state->refcount);
@@ -214,6 +223,7 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
struct kref refcount = dst_state->refcount;
#ifdef CONFIG_DRM_AMD_DC_FP
struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+ struct dml2_context *dst_dml2_dc_power_source = dst_state->bw_ctx.dml2_dc_power_source;
#endif
dc_state_copy_internal(dst_state, src_state);
@@ -222,6 +232,10 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
dst_state->bw_ctx.dml2 = dst_dml2;
if (src_state->bw_ctx.dml2)
dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+
+ dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source;
+ if (src_state->bw_ctx.dml2_dc_power_source)
+ dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source);
#endif
/* context refcount should not be overridden */
@@ -245,6 +259,12 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_release(new_state);
return NULL;
}
+
+ if (src_state->bw_ctx.dml2_dc_power_source &&
+ !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) {
+ dc_state_release(new_state);
+ return NULL;
+ }
#endif
kref_init(&new_state->refcount);
@@ -310,7 +330,6 @@ void dc_state_destruct(struct dc_state *state)
memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
state->dmub_cmd_count = 0;
memset(&state->perf_params, 0, sizeof(state->perf_params));
- memset(&state->scratch, 0, sizeof(state->scratch));
}
void dc_state_retain(struct dc_state *state)
@@ -327,6 +346,9 @@ static void dc_state_free(struct kref *kref)
#ifdef CONFIG_DRM_AMD_DC_FP
dml2_destroy(state->bw_ctx.dml2);
state->bw_ctx.dml2 = 0;
+
+ dml2_destroy(state->bw_ctx.dml2_dc_power_source);
+ state->bw_ctx.dml2_dc_power_source = 0;
#endif
kvfree(state);
@@ -334,13 +356,14 @@ static void dc_state_free(struct kref *kref)
void dc_state_release(struct dc_state *state)
{
- kref_put(&state->refcount, dc_state_free);
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
}
/*
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
*/
enum dc_status dc_state_add_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream)
{
@@ -369,7 +392,7 @@ enum dc_status dc_state_add_stream(
* dc_state_remove_stream() - Remove a stream from a dc_state.
*/
enum dc_status dc_state_remove_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream)
{
@@ -435,6 +458,15 @@ bool dc_state_add_plane(
goto out;
}
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)
@@ -585,7 +617,7 @@ bool dc_state_add_all_planes_for_stream(
*/
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
- struct dc_stream_state *stream)
+ const struct dc_stream_state *stream)
{
uint8_t i;
@@ -679,7 +711,7 @@ void dc_state_release_phantom_stream(const struct dc *dc,
dc_stream_release(phantom_stream);
}
-struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane)
{
@@ -715,7 +747,7 @@ void dc_state_release_phantom_plane(const struct dc *dc,
}
/* add phantom streams to context and generate correct meta inside dc_state */
-enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream)
@@ -741,7 +773,7 @@ enum dc_status dc_state_add_phantom_stream(struct dc *dc,
return res;
}
-enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream)
{
@@ -835,7 +867,7 @@ bool dc_state_add_all_phantom_planes_for_stream(
}
bool dc_state_remove_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state)
{
int i;
@@ -857,7 +889,7 @@ bool dc_state_remove_phantom_streams_and_planes(
}
void dc_state_release_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state)
{
int i;
@@ -868,3 +900,19 @@ void dc_state_release_phantom_streams_and_planes(
for (i = 0; i < state->phantom_plane_count; i++)
dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
}
+
+struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
+{
+ struct dc_stream_state *stream = NULL;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] && state->streams[i]->stream_id == id) {
+ stream = state->streams[i];
+ break;
+ }
+ }
+
+ return stream;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 54670e0b1518..5c7e4884cac2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -116,12 +116,7 @@ bool dc_stream_construct(struct dc_stream_state *stream,
update_stream_signal(stream, dc_sink_data);
- stream->out_transfer_func = dc_create_transfer_func();
- if (stream->out_transfer_func == NULL) {
- dc_sink_release(dc_sink_data);
- return false;
- }
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
+ stream->out_transfer_func.type = TF_TYPE_BYPASS;
dc_stream_assign_stream_id(stream);
@@ -131,10 +126,6 @@ bool dc_stream_construct(struct dc_stream_state *stream,
void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
- if (stream->out_transfer_func != NULL) {
- dc_transfer_func_release(stream->out_transfer_func);
- stream->out_transfer_func = NULL;
- }
}
void dc_stream_assign_stream_id(struct dc_stream_state *stream)
@@ -201,9 +192,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->sink)
dc_sink_retain(new_stream->sink);
- if (new_stream->out_transfer_func)
- dc_transfer_func_retain(new_stream->out_transfer_func);
-
dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
@@ -319,7 +307,7 @@ bool dc_stream_set_cursor_attributes(
program_cursor_attributes(dc, stream, attributes);
/* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
return true;
@@ -394,7 +382,7 @@ bool dc_stream_set_cursor_position(
program_cursor_position(dc, stream, position);
/* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
return true;
@@ -423,7 +411,9 @@ bool dc_stream_add_writeback(struct dc *dc,
return false;
}
- wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
+ dc_exit_ips_for_hw_access(dc);
+
+ wb_info->dwb_params.out_transfer_func = &stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
dwb->dwb_is_drc = false;
@@ -493,6 +483,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
if (dwb->funcs->set_fc_enable)
dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
@@ -503,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
- int i = 0, j = 0;
+ unsigned int i, j;
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;
@@ -542,6 +534,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
/* disable writeback */
if (dc->hwss.disable_writeback) {
struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
@@ -557,6 +551,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,
int num_dwb,
struct dc_writeback_info *wb_info)
{
+ dc_exit_ips_for_hw_access(dc);
+
if (dc->hwss.mmhubbub_warmup)
return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
else
@@ -569,6 +565,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -597,6 +595,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -628,6 +628,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -664,6 +666,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
+ dc_exit_ips_for_hw_access(dc);
+
return dc->hwss.dmdata_status_done(pipe);
}
@@ -698,6 +702,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.program_dmdata_engine(pipe_ctx);
if (hubp->funcs->dmdata_set_attributes != NULL &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 19a2c7140ae8..067f6555cfdf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -41,25 +41,15 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
{
plane_state->ctx = ctx;
- plane_state->gamma_correction = dc_create_gamma();
- if (plane_state->gamma_correction != NULL)
- plane_state->gamma_correction->is_identity = true;
+ plane_state->gamma_correction.is_identity = true;
- plane_state->in_transfer_func = dc_create_transfer_func();
- if (plane_state->in_transfer_func != NULL) {
- plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- }
- plane_state->in_shaper_func = dc_create_transfer_func();
- if (plane_state->in_shaper_func != NULL) {
- plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
- }
+ plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
- plane_state->lut3d_func = dc_create_3dlut_func();
+ plane_state->in_shaper_func.type = TF_TYPE_BYPASS;
- plane_state->blend_tf = dc_create_transfer_func();
- if (plane_state->blend_tf != NULL) {
- plane_state->blend_tf->type = TF_TYPE_BYPASS;
- }
+ plane_state->lut3d_func.state.raw = 0;
+
+ plane_state->blend_tf.type = TF_TYPE_BYPASS;
plane_state->pre_multiplied_alpha = true;
@@ -67,30 +57,27 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
void dc_plane_destruct(struct dc_plane_state *plane_state)
{
- if (plane_state->gamma_correction != NULL) {
- dc_gamma_release(&plane_state->gamma_correction);
- }
- if (plane_state->in_transfer_func != NULL) {
- dc_transfer_func_release(
- plane_state->in_transfer_func);
- plane_state->in_transfer_func = NULL;
- }
- if (plane_state->in_shaper_func != NULL) {
- dc_transfer_func_release(
- plane_state->in_shaper_func);
- plane_state->in_shaper_func = NULL;
- }
- if (plane_state->lut3d_func != NULL) {
- dc_3dlut_func_release(
- plane_state->lut3d_func);
- plane_state->lut3d_func = NULL;
- }
- if (plane_state->blend_tf != NULL) {
- dc_transfer_func_release(
- plane_state->blend_tf);
- plane_state->blend_tf = NULL;
+ // no more pointers to free within dc_plane_state
+}
+
+
+/* dc_state is passed in separately since it may differ from the current dc state accessible from plane_state e.g.
+ * if the driver is doing an update from an old context to a new one and the caller wants the pipe mask for the new
+ * context rather than the existing one
+ */
+uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state)
+{
+ uint8_t pipe_mask = 0;
+ int i;
+
+ for (i = 0; i < plane_state->ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == plane_state && pipe_ctx->plane_res.hubp)
+ pipe_mask |= 1 << pipe_ctx->plane_res.hubp->inst;
}
+ return pipe_mask;
}
/*******************************************************************************
@@ -103,7 +90,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
/*register_flip_interrupt(surface);*/
}
-struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
{
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
GFP_KERNEL);
@@ -161,6 +148,8 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index c9317ea0258e..3c33c3bcbe2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -44,6 +44,8 @@
#include "dml2/dml2_wrapper.h"
+#include "dmub/inc/dmub_cmd.h"
+
struct abm_save_restore;
/* forward declaration */
@@ -51,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.266"
+#define DC_VER "3.2.281"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -219,6 +221,7 @@ struct dc_dmub_caps {
bool mclk_sw;
bool subvp_psr;
bool gecc_enable;
+ uint8_t fams_ver;
};
struct dc_caps {
@@ -306,12 +309,12 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
- //These bitfields to be used starting with DCN
+ //These bitfields to be used starting with DCN 3.0
struct {
- uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
- uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
- uint32_t dcc_256_128_128 : 1; //available starting with DCN
- uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
} dcc_controls;
};
@@ -429,12 +432,15 @@ struct dc_config {
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
- bool use_old_fixed_vs_sequence;
bool dc_mode_clk_limit_support;
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
+ bool usb4_bw_alloc_support;
+ bool allow_0_dtb_clk;
+ bool use_assr_psp_message;
+ bool support_edp0_on_dp1;
};
enum visual_confirm {
@@ -693,6 +699,8 @@ enum pg_hw_pipe_resources {
PG_MPCC,
PG_OPP,
PG_OPTC,
+ PG_DPSTREAM,
+ PG_HDMISTREAM,
PG_HW_PIPE_RESOURCES_NUM_ELEMENT
};
@@ -987,12 +995,17 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool optimize_ips_handshake;
+ bool disable_dmub_reallow_idle;
bool disable_timeout;
bool disable_extblankadj;
+ bool enable_idle_reg_checks;
unsigned int static_screen_wait_frames;
+ bool force_chroma_subsampling_1tap;
+ bool disable_422_left_edge_pixel;
+ unsigned int force_cositing;
};
-struct gpu_info_soc_bounding_box_v1_0;
/* Generic structure that can be used to query properties of DC. More fields
* can be added as required.
@@ -1001,75 +1014,6 @@ struct dc_current_properties {
unsigned int cursor_size_limit;
};
-struct dc {
- struct dc_debug_options debug;
- struct dc_versions versions;
- struct dc_caps caps;
- struct dc_cap_funcs cap_funcs;
- struct dc_config config;
- struct dc_bounding_box_overrides bb_overrides;
- struct dc_bug_wa work_arounds;
- struct dc_context *ctx;
- struct dc_phy_addr_space_config vm_pa_config;
-
- uint8_t link_count;
- struct dc_link *links[MAX_PIPES * 2];
- struct link_service *link_srv;
-
- struct dc_state *current_state;
- struct resource_pool *res_pool;
-
- struct clk_mgr *clk_mgr;
-
- /* Display Engine Clock levels */
- struct dm_pp_clock_levels sclk_lvls;
-
- /* Inputs into BW and WM calculations. */
- struct bw_calcs_dceip *bw_dceip;
- struct bw_calcs_vbios *bw_vbios;
- struct dcn_soc_bounding_box *dcn_soc;
- struct dcn_ip_params *dcn_ip;
- struct display_mode_lib dml;
-
- /* HW functions */
- struct hw_sequencer_funcs hwss;
- struct dce_hwseq *hwseq;
-
- /* Require to optimize clocks and bandwidth for added/removed planes */
- bool optimized_required;
- bool wm_optimized_required;
- bool idle_optimizations_allowed;
- bool enable_c20_dtm_b0;
-
- /* Require to maintain clocks and bandwidth for UEFI enabled HW */
-
- /* FBC compressor */
- struct compressor *fbc_compressor;
-
- struct dc_debug_data debug_data;
- struct dpcd_vendor_signature vendor_signature;
-
- const char *build_id;
- struct vm_helper *vm_helper;
-
- uint32_t *dcn_reg_offsets;
- uint32_t *nbio_reg_offsets;
- uint32_t *clk_reg_offsets;
-
- /* Scratch memory */
- struct {
- struct {
- /*
- * For matching clock_limits table in driver with table
- * from PMFW.
- */
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- } update_bw_bounding_box;
- } scratch;
-
- struct dml2_configuration_options dml2_options;
-};
-
enum frame_buffer_mode {
FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
FRAME_BUFFER_MODE_ZFB_ONLY,
@@ -1249,6 +1193,7 @@ union surface_update_flags {
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
+ uint32_t clip_size_change: 1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
@@ -1273,6 +1218,8 @@ union surface_update_flags {
uint32_t raw;
};
+#define DC_REMOVE_PLANE_POINTERS 1
+
struct dc_plane_state {
struct dc_plane_address address;
struct dc_plane_flip_time time;
@@ -1287,8 +1234,8 @@ struct dc_plane_state {
struct dc_plane_dcc_param dcc;
- struct dc_gamma *gamma_correction;
- struct dc_transfer_func *in_transfer_func;
+ struct dc_gamma gamma_correction;
+ struct dc_transfer_func in_transfer_func;
struct dc_bias_and_scale *bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
@@ -1300,9 +1247,9 @@ struct dc_plane_state {
enum dc_color_space color_space;
- struct dc_3dlut *lut3d_func;
- struct dc_transfer_func *in_shaper_func;
- struct dc_transfer_func *blend_tf;
+ struct dc_3dlut lut3d_func;
+ struct dc_transfer_func in_shaper_func;
+ struct dc_transfer_func blend_tf;
struct dc_transfer_func *gamcor_tf;
enum surface_pixel_format format;
@@ -1338,6 +1285,7 @@ struct dc_plane_state {
struct tg_color visual_confirm_color;
bool is_statically_allocated;
+ enum chroma_cositing cositing;
};
struct dc_plane_info {
@@ -1356,6 +1304,96 @@ struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
+ enum chroma_cositing cositing;
+};
+
+#include "dc_stream.h"
+
+struct dc_scratch_space {
+ /* used to temporarily backup plane states of a stream during
+ * dc update. The reason is that plane states are overwritten
+ * with surface updates in dc update. Once they are overwritten
+ * current state is no longer valid. We want to temporarily
+ * store current value in plane states so we can still recover
+ * a valid current state during dc update.
+ */
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+
+ struct dc_stream_state stream_state;
+};
+
+struct dc {
+ struct dc_debug_options debug;
+ struct dc_versions versions;
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_config config;
+ struct dc_bounding_box_overrides bb_overrides;
+ struct dc_bug_wa work_arounds;
+ struct dc_context *ctx;
+ struct dc_phy_addr_space_config vm_pa_config;
+
+ uint8_t link_count;
+ struct dc_link *links[MAX_LINKS];
+ struct link_service *link_srv;
+
+ struct dc_state *current_state;
+ struct resource_pool *res_pool;
+
+ struct clk_mgr *clk_mgr;
+
+ /* Display Engine Clock levels */
+ struct dm_pp_clock_levels sclk_lvls;
+
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+
+ /* HW functions */
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+ /* Require to optimize clocks and bandwidth for added/removed planes */
+ bool optimized_required;
+ bool wm_optimized_required;
+ bool idle_optimizations_allowed;
+ bool enable_c20_dtm_b0;
+
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+
+ /* FBC compressor */
+ struct compressor *fbc_compressor;
+
+ struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
+
+ const char *build_id;
+ struct vm_helper *vm_helper;
+
+ uint32_t *dcn_reg_offsets;
+ uint32_t *nbio_reg_offsets;
+ uint32_t *clk_reg_offsets;
+
+ /* Scratch memory */
+ struct {
+ struct {
+ /*
+ * For matching clock_limits table in driver with table
+ * from PMFW.
+ */
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ } update_bw_bounding_box;
+ struct dc_scratch_space current_state;
+ struct dc_scratch_space new_state;
+ struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ } scratch;
+
+ struct dml2_configuration_options dml2_options;
+ enum dc_acpi_cm_power_state power_state;
+
};
struct dc_scaling_info {
@@ -1472,10 +1510,15 @@ bool dc_acquire_release_mpc_3dlut(
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
-
-enum dc_status dc_commit_streams(struct dc *dc,
- struct dc_stream_state *streams[],
- uint8_t stream_count);
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
@@ -1568,7 +1611,19 @@ struct dc_link {
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
union compliance_test_state compliance_test_state;
void *priv;
@@ -2219,11 +2274,9 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_FP)
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
-#endif
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
@@ -2321,10 +2374,17 @@ bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
- struct dc_cursor_attributes *cursor_attr);
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr);
+
+#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__)
+#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__)
-void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name);
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 363d522603a2..2293a92df3be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -23,6 +23,7 @@
*
*/
+#include "dm_services.h"
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../dmub/dmub_srv.h"
@@ -34,6 +35,7 @@
#include "resource.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -74,7 +76,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -146,7 +151,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
- dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@ -187,10 +194,17 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
// Wait for DMUB to process command
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ if (!dmub->debug.timeout_occured) {
+ dmub->debug.timeout_occured = true;
+ dmub->debug.timeout_cmd = *cmd_list;
+ dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ }
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
@@ -781,21 +795,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
} else if (subvp_pipe->next_odm_pipe) {
pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
} else {
- pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
}
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
- if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
+ phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
} else if (phantom_pipe->next_odm_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
} else {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
}
break;
}
@@ -896,12 +911,15 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_diagnostic_data diag_data = {0};
+ uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
+ DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
+
if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
@@ -925,7 +943,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
- DC_LOG_DEBUG(" pc : %08x", diag_data.pc);
+ for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
@@ -1191,13 +1210,35 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
return true;
}
+static int count_active_streams(const struct dc *dc)
+{
+ int i, count = 0;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (stream && !stream->dpms_off)
+ count += 1;
+ }
+
+ return count;
+}
+
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
+ volatile const struct dmub_shared_state_ips_fw *ips_fw;
+ struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
if (dc->debug.dmcub_emulation)
return;
+ if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
+ return;
+
+ dc_dmub_srv = dc->ctx->dmub_srv;
+ ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
+
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
@@ -1208,19 +1249,75 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
if (allow_idle) {
- if (dc->hwss.set_idle_state)
- dc->hwss.set_idle_state(dc, true);
+ volatile struct dmub_shared_state_ips_driver *ips_driver =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
+ union dmub_shared_state_ips_driver_signals new_signals;
+
+ DC_LOG_IPS(
+ "%s wait idle (ips1_commit=%d ips2_commit=%d)",
+ __func__,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ memset(&new_signals, 0, sizeof(new_signals));
+
+ if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
+ dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
+ new_signals.bits.allow_ips1 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
+ /* TODO: Move this logic out to hwseq */
+ if (count_active_streams(dc) == 0) {
+ /* IPS2 - Display off */
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else {
+ /* RCG only */
+ new_signals.bits.allow_pg = 0;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 0;
+ new_signals.bits.allow_z10 = 0;
+ }
+ }
+
+ ips_driver->signals = new_signals;
}
+ DC_LOG_IPS(
+ "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)",
+ __func__,
+ allow_idle,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
/* We also do not perform a wait since DMCUB could enter idle after the notification. */
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Register access should stop at this point. */
+ if (allow_idle)
+ dc_dmub_srv->needs_idle_wake = true;
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
- uint32_t allow_state = 0;
- uint32_t commit_state = 0;
+ struct dc_dmub_srv *dc_dmub_srv;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1228,66 +1325,122 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
- if (dc->hwss.get_idle_state &&
- dc->hwss.set_idle_state &&
- dc->clk_mgr->funcs->exit_low_power_state) {
+ dc_dmub_srv = dc->ctx->dmub_srv;
+
+ if (dc->clk_mgr->funcs->exit_low_power_state) {
+ volatile const struct dmub_shared_state_ips_fw *ips_fw =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
+ volatile struct dmub_shared_state_ips_driver *ips_driver =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
+ union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
+
+ rcg_exit_count = ips_fw->rcg_exit_count;
+ ips1_exit_count = ips_fw->ips1_exit_count;
+ ips2_exit_count = ips_fw->ips2_exit_count;
+
+ ips_driver->signals.all = 0;
+
+ DC_LOG_IPS(
+ "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)",
+ __func__,
+ ips_driver->signals.bits.allow_ips1,
+ ips_driver->signals.bits.allow_ips2,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->rcg_entry_count,
+ ips_fw->ips1_entry_count,
+ ips_fw->ips2_entry_count);
+
+ /* Note: register access has technically not resumed for DCN here, but we
+ * need to be message PMFW through our standard register interface.
+ */
+ dc_dmub_srv->needs_idle_wake = false;
- allow_state = dc->hwss.get_idle_state(dc);
- dc->hwss.set_idle_state(dc, false);
+ if (prev_driver_signals.bits.allow_ips2 &&
+ (!dc->debug.optimize_ips_handshake ||
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ DC_LOG_IPS(
+ "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
- // Wait for evaluation time
- for (;;) {
+ if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
udelay(dc->debug.ips2_eval_delay_us);
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_ALLOW_MASK)
- break;
- /* allow was still set, retry eval delay */
- dc->hwss.set_idle_state(dc, false);
- }
+ if (ips_fw->signals.bits.ips2_commit) {
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
+
+ DC_LOG_IPS(
+ "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
- for (;;) {
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_COMMIT_MASK)
- break;
+ DC_LOG_IPS(
+ "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+ while (ips_fw->signals.bits.ips2_commit)
udelay(1);
- }
+
+ DC_LOG_IPS(
+ "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- /* TODO: See if we can return early here - IPS2 should go
- * back directly to IPS0 and clear the flags, but it will
- * be safer to directly notify DMCUB of this.
- */
- allow_state = dc->hwss.get_idle_state(dc);
+ DC_LOG_IPS(
+ "resync inbox1 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
+ dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
- if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
- for (;;) {
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS1_COMMIT_MASK)
- break;
+ if (prev_driver_signals.bits.allow_ips1) {
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+ while (ips_fw->signals.bits.ips1_commit)
udelay(1);
- }
+
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+
+ DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)",
+ __func__,
+ rcg_exit_count,
+ ips1_exit_count,
+ ips2_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
@@ -1315,21 +1468,42 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
if (dc_dmub_srv->idle_allowed == allow_idle)
return;
+ DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
+
/*
* Entering a low power state requires a driver notification.
* Powering up the hardware requires notifying PMFW and DMCUB.
* Clearing the driver idle allow requires a DMCUB command.
* DMCUB commands requires the DMCUB to be powered up and restored.
- *
- * Exit out early to prevent an infinite loop of DMCUB commands
- * triggering exit low power - use software state to track this.
*/
- dc_dmub_srv->idle_allowed = allow_idle;
- if (!allow_idle)
+ if (!allow_idle) {
+ dc_dmub_srv->idle_exit_counter += 1;
+
dc_dmub_srv_exit_low_power_state(dc);
- else
+ /*
+ * Idle is considered fully exited only after the sequence above
+ * fully completes. If we have a race of two threads exiting
+ * at the same time then it's safe to perform the sequence
+ * twice as long as we're not re-entering.
+ *
+ * Infinite command submission is avoided by using the
+ * dm_execute_dmub_cmd submission instead of the "wake" helpers.
+ */
+ dc_dmub_srv->idle_allowed = false;
+
+ dc_dmub_srv->idle_exit_counter -= 1;
+ if (dc_dmub_srv->idle_exit_counter < 0) {
+ ASSERT(0);
+ dc_dmub_srv->idle_exit_counter = 0;
+ }
+ } else {
+ /* Consider idle as notified prior to the actual submission to
+ * prevent multiple entries. */
+ dc_dmub_srv->idle_allowed = true;
+
dc_dmub_srv_notify_idle(dc, allow_idle);
+ }
}
bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
@@ -1364,7 +1538,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1413,8 +1588,10 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 952bfb368886..2c5866211f60 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -35,6 +35,7 @@ struct pipe_ctx;
struct dc_crtc_timing_adjust;
struct dc_crtc_timing;
struct dc_state;
+struct dc_surface_update;
struct dc_reg_helper_state {
bool gather_in_progress;
@@ -51,7 +52,9 @@ struct dc_dmub_srv {
struct dc_context *ctx;
void *dm;
+ int32_t idle_exit_counter;
bool idle_allowed;
+ bool needs_idle_wake;
};
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 1cb7765f593a..519c3df78ee5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -137,8 +137,13 @@ enum dp_link_encoding {
enum dp_test_link_rate {
DP_TEST_LINK_RATE_RBR = 0x06,
+ DP_TEST_LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane
+ DP_TEST_LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane
DP_TEST_LINK_RATE_HBR = 0x0A,
+ DP_TEST_LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane
+ DP_TEST_LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
DP_TEST_LINK_RATE_HBR2 = 0x14,
+ DP_TEST_LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane
DP_TEST_LINK_RATE_HBR3 = 0x1E,
DP_TEST_LINK_RATE_UHBR10 = 0x01,
DP_TEST_LINK_RATE_UHBR20 = 0x02,
@@ -917,16 +922,6 @@ struct dpcd_usb4_dp_tunneling_info {
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
-#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
-#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
-#endif
-#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
-#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
-#endif
-#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256
-#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250
-#endif
-
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1232,8 +1227,7 @@ union replay_enable_and_configuration {
unsigned char FREESYNC_PANEL_REPLAY_MODE :1;
unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1;
unsigned char STATE_TRANSITION_ERROR_DETECTION :1;
- unsigned char RESERVED0 :1;
- unsigned char RESERVED1 :4;
+ unsigned char RESERVED :5;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 811474f4419b..2ad7f60805f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -738,6 +738,13 @@ enum scanning_type {
SCANNING_TYPE_UNDEFINED
};
+enum chroma_cositing {
+ CHROMA_COSITING_NONE,
+ CHROMA_COSITING_LEFT,
+ CHROMA_COSITING_TOPLEFT,
+ CHROMA_COSITING_COUNT
+};
+
struct dc_crtc_timing_flags {
uint32_t INTERLACE :1;
uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
@@ -827,9 +834,7 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_FP)
bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
-#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
@@ -942,6 +947,7 @@ struct dc_crtc_timing {
uint32_t hdmi_vic;
uint32_t rid;
uint32_t fr_index;
+ uint32_t frl_uncompressed_video_bandwidth_in_kbps;
enum dc_timing_3d_format timing_3d_format;
enum dc_color_depth display_color_depth;
enum dc_pixel_encoding pixel_encoding;
@@ -975,6 +981,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_max;
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
+ uint32_t allow_otg_v_count_halt;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index ef380cae816a..44afcd989224 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -29,7 +29,7 @@
#include "dc.h"
#include "dc_hw_types.h"
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
index 9ee184c1df00..ab13335f1d01 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
@@ -30,5 +30,6 @@
void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
void dc_plane_destruct(struct dc_plane_state *plane_state);
+uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state);
#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
index d167fdbfa8a9..caa45db50232 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -29,7 +29,7 @@
#include "dc.h"
#include "inc/core_status.h"
-struct dc_state *dc_state_create(struct dc *dc);
+struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params);
void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
struct dc_state *dc_state_create_copy(struct dc_state *src_state);
void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
@@ -39,12 +39,12 @@ void dc_state_destruct(struct dc_state *state);
void dc_state_retain(struct dc_state *state);
void dc_state_release(struct dc_state *state);
-enum dc_status dc_state_add_stream(struct dc *dc,
+enum dc_status dc_state_add_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
enum dc_status dc_state_remove_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
@@ -74,5 +74,5 @@ bool dc_state_add_all_planes_for_stream(
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
- struct dc_stream_state *stream);
+ const struct dc_stream_state *stream);
#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index c1f44e09a6c1..615086d74d32 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -29,6 +29,8 @@
#include "dc_state.h"
#include "dc_stream.h"
+struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id);
+
/* Get the type of the provided resource (none, phantom, main) based on the provided
* context. If the context is unavailable, determine only if phantom or not.
*/
@@ -45,7 +47,7 @@ struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *
struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *main_stream);
-struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane);
@@ -58,11 +60,11 @@ void dc_state_release_phantom_plane(const struct dc *dc,
struct dc_plane_state *phantom_plane);
/* add/remove phantom stream to context and generate subvp meta data */
-enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream);
-enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream);
@@ -92,11 +94,11 @@ bool dc_state_add_all_phantom_planes_for_stream(
struct dc_state *state);
bool dc_state_remove_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
void dc_state_release_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index ee10941caa59..e5dbbc6089a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -190,7 +190,7 @@ struct dc_stream_state {
PHYSICAL_ADDRESS_LOC dmdata_address;
bool use_dynamic_meta;
- struct dc_transfer_func *out_transfer_func;
+ struct dc_transfer_func out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
struct dc_csc_transform csc_color_matrix;
@@ -428,14 +428,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
/*
- * Set up streams and links associated to drive sinks
- * The streams parameter is an absolute set of all active streams.
- *
- * After this call:
- * Phy, Encoder, Timing Generator are programmed and enabled.
- * New streams are enabled with blank stream; no memory read.
- */
-/*
* Enable stereo when commit_streams is not required,
* for example, frame alternate.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9900dda2eef5..0f66d00ef80f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -422,7 +422,7 @@ struct dc_dwb_params {
enum dwb_capture_rate capture_rate; /* controls the frame capture rate */
struct scaling_taps scaler_taps; /* Scaling taps */
enum dwb_subsample_position subsample_position;
- struct dc_transfer_func *out_transfer_func;
+ const struct dc_transfer_func *out_transfer_func;
};
/* audio*/
@@ -1050,6 +1050,8 @@ union replay_error_status {
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
+ /* Replay caps support DPCD & EDID caps*/
+ bool replay_cap_support;
/* Power opt flags that are supported */
unsigned int replay_power_opt_supported;
/* SMU optimization is supported */
@@ -1085,9 +1087,9 @@ struct replay_settings {
/* SMU optimization is enabled */
bool replay_smu_opt_enable;
/* Current Coasting vtotal */
- uint16_t coasting_vtotal;
+ uint32_t coasting_vtotal;
/* Coasting vtotal table */
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
@@ -1175,4 +1177,20 @@ enum mall_stream_type {
SUBVP_MAIN, // subvp in use, this stream is main stream
SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
};
+
+enum dc_power_source_type {
+ DC_POWER_SOURCE_AC, // wall power
+ DC_POWER_SOURCE_DC, // battery power
+};
+
+struct dc_state_create_params {
+ enum dc_power_source_type power_source;
+};
+
+struct dc_commit_streams_params {
+ struct dc_stream_state **streams;
+ uint8_t stream_count;
+ enum dc_power_source_type power_source;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index f0458b8f00af..12f3c35b3a34 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi(
}
}
}
+static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return dc_fixpt_from_int(162); /* 162 MHz */
+ case LINK_RATE_HIGH:
+ return dc_fixpt_from_int(270); /* 270 MHz */
+ case LINK_RATE_HIGH2:
+ return dc_fixpt_from_int(540); /* 540 MHz */
+ case LINK_RATE_HIGH3:
+ return dc_fixpt_from_int(810); /* 810 MHz */
+ case LINK_RATE_UHBR10:
+ return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */
+ case LINK_RATE_UHBR13_5:
+ return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */
+ case LINK_RATE_UHBR20:
+ return dc_fixpt_from_int(625); /* 625 MHz */
+ default:
+ /* Unexpected case, this requires debug if encountered. */
+ ASSERT(0);
+ return dc_fixpt_from_int(0);
+ }
+}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
-/*For DP SST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpsst(
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
+ }
+
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+static uint32_t calculate_required_audio_bw_in_symbols(
const struct audio_crtc_info *crtc_info,
+ const struct dp_audio_layout_config *layout_config,
uint32_t channel_count,
- union audio_sample_rates *sample_rates)
+ uint32_t sample_rate_hz,
+ uint32_t av_stream_map_lane_count,
+ uint32_t audio_sdp_overhead)
+{
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom);
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+ }
+
+ return required_symbols_per_hblank;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_available_hblank_bw_in_symbols(
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info)
{
- /* do nothing */
+ uint64_t hblank = crtc_info->h_total - crtc_info->h_active;
+ struct fixed31_32 hblank_time_msec =
+ dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz);
+ struct fixed31_32 lsclkfreq_mhz =
+ get_link_symbol_clk_freq_mhz(dp_link_info->link_rate);
+ struct fixed31_32 average_stream_sym_bw_frac;
+ struct fixed31_32 peak_stream_bw_kbps;
+ struct fixed31_32 bits_per_pixel;
+ struct fixed31_32 link_bw_kbps;
+ struct fixed31_32 available_stream_sym_count;
+ uint32_t available_hblank_bw = 0; /* in stream symbols */
+
+ if (crtc_info->dsc_bits_per_pixel) {
+ bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16);
+ } else {
+ switch (crtc_info->color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_pixel = dc_fixpt_from_int(6);
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_pixel = dc_fixpt_from_int(8);
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_pixel = dc_fixpt_from_int(10);
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_pixel = dc_fixpt_from_int(12);
+ break;
+ default:
+ /* Default to commonly supported color depth. */
+ bits_per_pixel = dc_fixpt_from_int(8);
+ break;
+ }
+
+ bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3);
+
+ if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3);
+ bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2);
+ } else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2);
+ }
+ }
+
+ /* Use simple stream BW calculation because mainlink overhead is
+ * accounted for separately in the audio BW calculations.
+ */
+ peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10);
+ peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel);
+ link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps);
+ average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps);
+
+ available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000);
+ available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz);
+ available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac);
+ available_hblank_bw = dc_fixpt_floor(available_stream_sym_count);
+ available_hblank_bw *= dp_link_info->lane_count;
+ available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */
+
+ if (available_hblank_bw < dp_link_info->hblank_min_symbol_width)
+ available_hblank_bw = dp_link_info->hblank_min_symbol_width;
+
+ if (available_hblank_bw < 12)
+ available_hblank_bw = 0;
+ else
+ available_hblank_bw -= 12; /* Main link overhead */
+
+ return available_hblank_bw;
}
-/*For DP MST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpmst(
+static void check_audio_bandwidth_dp(
const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info,
uint32_t channel_count,
union audio_sample_rates *sample_rates)
{
- /* do nothing */
+ struct dp_audio_layout_config layout_config = {0};
+ uint32_t available_hblank_bw;
+ uint32_t av_stream_map_lane_count;
+ uint32_t audio_sdp_overhead;
+
+ /* TODO: Add validation for SST 8b/10 case */
+ if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING)
+ return;
+
+ available_hblank_bw = calculate_available_hblank_bw_in_symbols(
+ crtc_info, dp_link_info);
+ av_stream_map_lane_count = get_av_stream_map_lane_count(
+ dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+ audio_sdp_overhead = get_audio_sdp_overhead(
+ dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+ get_audio_layout_config(
+ channel_count, dp_link_info->encoding, &layout_config);
+
+ if (layout_config.max_layouts_per_audio_sdp == 0 ||
+ layout_config.symbols_per_layout == 0 ||
+ layout_config.layouts_per_sample_denom == 0) {
+ return;
+ }
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 192000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_192 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 176400,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_176_4 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 96000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_96 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 88200,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_88_2 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 48000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_48 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 44100,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_44_1 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 32000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_32 = 0;
}
static void check_audio_bandwidth(
const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info,
uint32_t channel_count,
enum signal_type signal,
union audio_sample_rates *sample_rates)
@@ -271,12 +538,9 @@ static void check_audio_bandwidth(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- check_audio_bandwidth_dpsst(
- crtc_info, channel_count, sample_rates);
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- check_audio_bandwidth_dpmst(
- crtc_info, channel_count, sample_rates);
+ check_audio_bandwidth_dp(
+ crtc_info, dp_link_info, channel_count, sample_rates);
break;
default:
break;
@@ -394,7 +658,8 @@ void dce_aud_az_configure(
struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info)
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info)
{
struct dce_audio *aud = DCE_AUD(audio);
@@ -529,6 +794,7 @@ void dce_aud_az_configure(
check_audio_bandwidth(
crtc_info,
+ dp_link_info,
channel_count,
signal,
&sample_rates);
@@ -588,6 +854,7 @@ void dce_aud_az_configure(
check_audio_bandwidth(
crtc_info,
+ dp_link_info,
8,
signal,
&sample_rate);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index dbd2cfed0603..539f881928d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio);
void dce_aud_az_configure(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info);
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info);
void dce_aud_wall_dto_setup(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 970644b695cd..b5e0289d2fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ // Apply ssed(spread spectrum) dpref clock for edp only.
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
+ && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
+ && encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
- dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
-
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index a2f48d46d199..ee601a6897a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -22,9 +22,6 @@
* Authors: AMD
*
*/
-
-#include <linux/delay.h>
-
#include "resource.h"
#include "dce_i2c.h"
#include "dce_i2c_hw.h"
@@ -315,9 +312,6 @@ static bool setup_engine(
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
-
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index f98400efdd9b..e34e445a4013 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -181,6 +181,7 @@ struct dce_mem_input_registers {
SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index bf1ffc3629c7..3d9be87aae45 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,6 +111,7 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 670d5ab9d998..2b1673d69ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -1408,7 +1408,7 @@ void dce110_opp_set_csc_default(
static void program_pwl(struct dce_transform *xfm_dce,
const struct pwl_params *params)
{
- int retval;
+ uint32_t retval;
uint8_t max_tries = 10;
uint8_t counter = 0;
uint32_t i = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index f9d6a181164a..b851fc65f5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -34,11 +34,7 @@
#include "reg_helper.h"
#include "fixed31_32.h"
-#ifdef _WIN32
-#include "atombios.h"
-#else
#include "atom.h"
-#endif
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index ba1fec3016d5..bf636b28e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link)
{
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
+
+ if (link->replay_settings.replay_feature_enabled)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 38e4797e9476..4f559a025cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -244,7 +244,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
uint16_t param = (uint16_t)(panel_inst << 8);
if (is_alpm)
- param |= REPLAY_RESIDENCY_MODE_ALPM;
+ param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM;
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
@@ -258,7 +258,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
*residency = 0;
}
-/**
+/*
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
@@ -280,7 +280,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-/**
+/*
* send Replay general cmd to DMUB.
*/
static void dmub_replay_send_cmd(struct dmub_replay *dmub,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index f0777d61c2cb..c307f040e48f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_opp_regamma_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 7e92effec894..683866797709 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
DCE112 = dce112_compressor.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 1e3ef68a452a..8f508e662748 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,7 +24,7 @@
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
DCE120 = dce120_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index fee331accc0e..eede83ad91fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 7eefffbdc925..fba189d26652 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
DCE80 = dce80_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index ae6a131be71b..8dc7938c36d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,9 +24,9 @@
DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_dpp.o dcn10_opp.o \
+ dcn10_opp.o \
dcn10_hubp.o dcn10_mpc.o \
- dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 3538973bd0c6..0b49362f71b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -24,7 +24,7 @@
*/
#include "dc.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "dcn10_cm_common.h"
#include "custom_float.h"
@@ -62,6 +62,26 @@ void cm_helper_program_color_matrices(
}
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+ uint16_t *regval,
+ const struct color_matrices_reg *reg)
+{
+ uint32_t cur_csc_reg, regval0, regval1;
+ unsigned int i = 0;
+
+ for (cur_csc_reg = reg->csc_c11_c12;
+ cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) {
+ REG_GET_2(cur_csc_reg,
+ csc_c11, &regval0,
+ csc_c12, &regval1);
+
+ regval[2 * i] = regval0;
+ regval[(2 * i) + 1] = regval1;
+
+ i++;
+ }
+}
+
void cm_helper_program_xfer_func(
struct dc_context *ctx,
const struct pwl_params *params,
@@ -382,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS) {
+ DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
+ i, TRANSFER_FUNC_POINTS);
+ return false;
+ }
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 0a68b63d6126..decc50b1ac53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params);
-
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+ uint16_t *regval,
+ const struct color_matrices_reg *reg);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index d51f1ce02874..6dd355a03033 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -130,7 +130,7 @@ bool hubbub1_verify_allow_pstate_change_high(
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
- unsigned int debug_data;
+ unsigned int debug_data = 0;
unsigned int i;
if (forced_pstate_allow) {
@@ -242,7 +242,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -356,7 +356,7 @@ bool hubbub1_program_urgent_watermarks(
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -501,7 +501,7 @@ bool hubbub1_program_stutter_watermarks(
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -582,7 +582,7 @@ bool hubbub1_program_pstate_watermarks(
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 4201b7627030..d1f9e63944c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -409,7 +409,7 @@ struct dcn10_hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
};
void hubbub1_update_dchub(
@@ -423,7 +423,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
@@ -446,17 +446,17 @@ void hubbub1_construct(struct hubbub *hubbub,
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 09784222cc03..69119b2fdce2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -692,6 +692,7 @@ struct dcn_hubp_state {
uint32_t primary_meta_addr_hi;
uint32_t uclk_pstate_force;
uint32_t hubp_cntl;
+ uint32_t flip_control;
};
struct dcn10_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 9033b39e0e0c..c51b717e5622 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -392,7 +392,7 @@ static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned in
remaining_buffer -= chars_printed;
pBuf += chars_printed;
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 377f1ba1a81b..4d0eed7598b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1439,7 +1439,6 @@ enum signal_type dcn10_get_dig_mode(
default:
return SIGNAL_TYPE_NONE;
}
- return SIGNAL_TYPE_NONE;
}
void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d980e6bd6c66..b7a89c39f445 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -167,7 +167,6 @@ struct dcn10_link_enc_registers {
uint32_t DIO_LINKD_CNTL;
uint32_t DIO_LINKE_CNTL;
uint32_t DIO_LINKF_CNTL;
- uint32_t DIG_FIFO_CTRL0;
uint32_t DIO_CLK_CNTL;
uint32_t DIG_BE_CLK_CNTL;
};
@@ -475,9 +474,6 @@ struct dcn10_link_enc_registers {
type HPO_DP_ENC_SEL;\
type HPO_HDMI_ENC_SEL
-#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \
- type DIG_FIFO_OUTPUT_PIXEL_MODE
-
#define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_BE_ENABLE;\
type DIG_RB_SWITCH_EN;\
@@ -512,7 +508,6 @@ struct dcn10_link_enc_shift {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
- DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
};
@@ -521,7 +516,6 @@ struct dcn10_link_enc_mask {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
- DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0dec57679269..71e9288d60ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_types.h"
#include "dm_services.h"
#include "dcn10_opp.h"
#include "reg_helper.h"
@@ -160,10 +161,17 @@ static void opp1_set_pixel_encoding(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
+ bool force_chroma_subsampling_1tap =
+ oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap;
+
switch (params->pixel_encoding) {
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
@@ -173,11 +181,17 @@ static void opp1_set_pixel_encoding(
FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
break;
default:
break;
}
+
+ if (force_chroma_subsampling_1tap)
+ REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0);
}
/**
@@ -377,6 +391,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 2c0ecfa5a643..c87de68a509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -79,6 +79,8 @@
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index c429590f1298..1b96972b9d0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers {
uint32_t AFMT_60958_1;
uint32_t AFMT_60958_2;
uint32_t DIG_FE_CNTL;
- uint32_t DIG_FE_CNTL2;
uint32_t DIG_FIFO_STATUS;
uint32_t DP_MSE_RATE_CNTL;
uint32_t DP_MSE_RATE_UPDATE;
@@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP11_ENABLE;\
type DP_SEC_GSP11_LINE_NUM
-#define SE_REG_FIELD_LIST_DCN3_2(type) \
+#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \
type DIG_FIFO_OUTPUT_PIXEL_MODE;\
type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\
type DIG_SYMCLK_FE_ON;\
@@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift {
uint8_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
SE_REG_FIELD_LIST_DCN3_0(uint8_t);
- SE_REG_FIELD_LIST_DCN3_2(uint8_t);
+ SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t);
};
@@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask {
uint32_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
SE_REG_FIELD_LIST_DCN3_0(uint32_t);
- SE_REG_FIELD_LIST_DCN3_2(uint32_t);
+ SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t);
};
@@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 3dae3943b056..9b6070c99794 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
-DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index f8667be57046..80779e85e2c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -299,6 +299,17 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
}
}
+
+ if (dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL) {
+ /* Swap double buffered coefficient set */
+ uint32_t wbscl_mode = REG_READ(WBSCL_MODE);
+ bool coef_ram_current = get_reg_field_value_ex(
+ wbscl_mode, dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL_CURRENT,
+ dwbc20->dwbc_shift->WBSCL_COEF_RAM_SEL_CURRENT);
+
+ REG_UPDATE(WBSCL_MODE, WBSCL_COEF_RAM_SEL, !coef_ram_current);
+ }
+
}
static const struct dwbc_funcs dcn20_dwbc_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 6eebcb22e317..c6f859871d11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -570,7 +570,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 2f6146bf1d32..24a9c45988ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -85,7 +85,7 @@ struct dcn20_hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 89c3bf0fe0c9..6bba020ad6fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1331,6 +1331,12 @@ void hubp2_read_state(struct hubp *hubp)
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+ if (REG(DCHUBP_CNTL))
+ s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+
+ if (REG(DCSURF_FLIP_CONTROL))
+ s->flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+
}
static void hubp2_validate_dml_output(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index efa2adf4f83d..8da3084d933f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -147,7 +147,7 @@
uint32_t DCN_CUR1_TTU_CNTL1;\
uint32_t VMID_SETTINGS_0
-
+/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index b2b266953d18..c34e04cac9a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -147,7 +147,8 @@
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
@@ -231,6 +232,8 @@
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5da6e44f284a..ea73473b970a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl(
MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg);
-
}
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
@@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
-
- REG_SEQ_SUBMIT();
- PERF_TRACE();
- REG_SEQ_WAIT_DONE();
- PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
@@ -542,8 +540,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
+static void mpc2_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+
+ /* Gamma block state */
+ REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst],
+ MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode);
+}
+
static const struct mpc_funcs dcn20_mpc_funcs = {
- .read_mpcc_state = mpc1_read_mpcc_state,
+ .read_mpcc_state = mpc2_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 0784d0198661..fbf1b6370eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 3ab221bdd27d..8f186abd558d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
index 2b0b4f32e13b..3880db59e457 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
DCN201 = dcn201_hubbub.o\
- dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \
dcn201_dccg.o dcn201_link_encoder.o
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
index 037d265431c6..63798132ed95 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
@@ -52,7 +52,7 @@
static bool hubbub201_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -103,5 +103,5 @@ void hubbub201_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
- hubbub->detile_buf_size = 164 * 1024;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
index 35dd4bac242a..cd2bfcc51276 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ /* no need to program PTE */
REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
@@ -99,6 +100,10 @@ static void hubp201_setup(
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
+ /*
+ * otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp201_program_requestor(hubp, rq_regs);
hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
index 8b95ef251332..be25e8dc0636 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
@@ -30,6 +30,10 @@
#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\
@@ -44,7 +48,15 @@
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh)
#define DPCS_DCN201_REG_LIST(id) \
- DPCS_DCN2_CMN_REG_LIST(id)
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id)
void dcn201_link_encoder_construct(
struct dcn20_link_encoder *enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
index 8e77db46a409..6a71ba3dfc63 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index aeb0e0d9b70a..2546224b326a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -140,7 +140,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -334,7 +334,7 @@ bool hubbub21_program_urgent_watermarks(
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -487,7 +487,7 @@ bool hubbub21_program_stutter_watermarks(
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -573,7 +573,7 @@ bool hubbub21_program_pstate_watermarks(
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index d8eb2bb7282c..ab2ce0313529 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -127,22 +127,22 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index b5b2aa3b3783..c6ca70f3c061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -25,13 +25,11 @@
DCN30 := dcn30_hubbub.o \
dcn30_hubp.o \
- dcn30_dpp.o \
dcn30_dccg.o \
dcn30_mpc.o dcn30_vpg.o \
dcn30_afmt.o \
dcn30_dio_stream_encoder.o \
dcn30_dwb.o \
- dcn30_dpp_cm.o \
dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index ddb344056d40..b8327237ed44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
#include "dcn30_cm_common.h"
#include "custom_float.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
index 35a613bb08bf..3f1da7f3a91c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
@@ -29,15 +29,9 @@
#include "dcn20/dcn20_dccg.h"
-#define DCCG_REG_LIST_DCN3AG() \
- DCCG_COMMON_REG_LIST_DCN_BASE(),\
- SR(PHYASYMCLK_CLOCK_CNTL),\
- SR(PHYBSYMCLK_CLOCK_CNTL),\
- SR(PHYCSYMCLK_CLOCK_CNTL)
-
-
#define DCCG_REG_LIST_DCN30() \
DCCG_REG_LIST_DCN2(),\
+ DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\
@@ -46,19 +40,10 @@
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL)
-#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \
- DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\
- DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
- DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh)
-
#define DCCG_MASK_SH_LIST_DCN3(mask_sh) \
DCCG_MASK_SH_LIST_DCN2(mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 1fb8fd7afc95..b8e31b5ea114 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -30,8 +30,6 @@
#include "dcn30_dio_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
-/* #include "dcn3ag/dcn3ag_phy_fw.h" */
-
#include "gpio_service_interface.h"
#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
index f2d90f2b8bf1..5b6177c2ae98 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -55,7 +55,8 @@
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \
- LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh)
#define DPCS_DCN3_MASK_SH_LIST(mask_sh)\
DPCS_DCN2_MASK_SH_LIST(mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 005dbe099a7a..425b830b88d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -29,9 +29,6 @@
#include "reg_helper.h"
#include "hw_shared.h"
#include "dc.h"
-#include "core_types.h"
-#include <linux/delay.h>
-
#define DC_LOGGER \
enc1->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index 1b9d9495f76d..fae98cf52020 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
- .dwb_program_output_csc = NULL,
.dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
- .dwb_set_scaler = NULL,
};
void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index 332634b76aac..0f3f7c5fbaec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -217,6 +217,7 @@
SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\
SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
@@ -524,6 +525,7 @@
type DWB_OGAM_LUT_DATA;\
type DWB_OGAM_LUT_WRITE_COLOR_MASK;\
type DWB_OGAM_LUT_READ_COLOR_SEL;\
+ type DWB_OGAM_LUT_READ_DBG;\
type DWB_OGAM_LUT_HOST_SEL;\
type DWB_OGAM_LUT_CONFIG_MODE;\
type DWB_OGAM_LUT_STATUS;\
@@ -710,7 +712,7 @@
type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\
type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\
type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\
- type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS;
+ type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS
struct dcn30_dwbc_registers {
/* DCN3AG */
@@ -733,6 +735,10 @@ struct dcn30_dwbc_registers {
uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT;
uint32_t DWB_HOST_READ_CONTROL;
uint32_t DWB_SOFT_RESET;
+ uint32_t DWB_DEBUG_CTRL;
+ uint32_t DWB_DEBUG;
+ uint32_t DWB_TEST_DEBUG_INDEX;
+ uint32_t DWB_TEST_DEBUG_DATA;
/* DWBSCL */
uint32_t DWBSCL_COEF_RAM_TAP_SELECT;
@@ -747,6 +753,9 @@ struct dcn30_dwbc_registers {
uint32_t DWBSCL_DEST_SIZE;
uint32_t DWBSCL_OVERFLOW_STATUS;
uint32_t DWBSCL_OVERFLOW_COUNTER;
+ uint32_t DWBSCL_DEBUG;
+ uint32_t DWBSCL_TEST_DEBUG_INDEX;
+ uint32_t DWBSCL_TEST_DEBUG_DATA;
/* DWBCP */
uint32_t DWB_HDR_MULT_COEF;
@@ -838,6 +847,9 @@ struct dcn30_dwbc_registers {
uint32_t DWB_OGAM_RAMB_REGION_28_29;
uint32_t DWB_OGAM_RAMB_REGION_30_31;
uint32_t DWB_OGAM_RAMB_REGION_32_33;
+ uint32_t DWBCP_DEBUG;
+ uint32_t DWBCP_TEST_DEBUG_INDEX;
+ uint32_t DWBCP_TEST_DEBUG_DATA;
};
/* Internal enums / structs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
index 152c9c5733f1..6a5af3da4b45 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
@@ -95,7 +95,7 @@ int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
bool hubbub3_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
index 7b597908b937..ca6233e8f1f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
@@ -124,7 +124,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
bool hubbub3_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 75547ce86c09..60a64d290352 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -455,6 +455,9 @@ void hubp3_read_state(struct hubp *hubp)
if (REG(DCHUBP_CNTL))
s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ if (REG(DCSURF_FLIP_CONTROL))
+ s->flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+
}
void hubp3_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d1500b223858..fca94e50ae93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -44,6 +44,36 @@
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+void mpc3_mpc_init(struct mpc *mpc)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int opp_id;
+
+ mpc1_mpc_init(mpc);
+
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+ }
+}
+
+void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc1_mpc_init_single_inst(mpc, mpcc_id);
+
+ /* assuming mpc out mux is connected to opp with the same index at this
+ * point in time (e.g. transitioning from vbios to driver)
+ */
+ if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+}
+
bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id)
@@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control)
-{
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_RATE_CONTROL_DISABLE, !enable,
- MPC_OUT_RATE_CONTROL, rate_2x_mode);
-
- if (flow_control)
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
- MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
-}
-
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1129,6 +1140,64 @@ void mpc3_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn30_mpc *mpc30,
+ int mpcc_id,
+ uint16_t *regval,
+ uint32_t *select)
+{
+ struct color_matrices_reg gam_regs;
+
+ //current coefficient set in use
+ REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select);
+
+ gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
+ gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
+
+ cm_helper_read_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
+
+ cm_helper_read_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ }
+
+}
+
+void mpc3_get_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ struct mpc_grph_gamut_adjustment *adjust)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint16_t arr_reg_val[12] = {0};
+ int select;
+
+ read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
bool mpc3_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
@@ -1382,12 +1451,58 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
}
}
+static void mpc3_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint32_t rmu_status = 0xf;
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+
+ /* Color blocks state */
+ REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status);
+
+ if (rmu_status == mpcc_inst) {
+ REG_GET(SHAPER_CONTROL[0],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+ REG_GET(RMU_3DLUT_MODE[0],
+ MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0],
+ MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(RMU_3DLUT_MODE[0],
+ MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+ } else {
+ REG_GET(SHAPER_CONTROL[1],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+ REG_GET(RMU_3DLUT_MODE[1],
+ MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1],
+ MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(RMU_3DLUT_MODE[1],
+ MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+ }
+
+ REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst],
+ MPCC_OGAM_MODE_CURRENT, &s->rgam_mode,
+ MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
- .read_mpcc_state = mpc1_read_mpcc_state,
+ .read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc1_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init = mpc3_mpc_init,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1404,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 5198f2167c7c..ce93003dae01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc3_mpc_init(
+ struct mpc *mpc);
+
+void mpc3_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
bool mpc3_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
@@ -1056,6 +1063,10 @@ void mpc3_set_gamut_remap(
int mpcc_id,
const struct mpc_grph_gamut_adjustment *adjust);
+void mpc3_get_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ struct mpc_grph_gamut_adjustment *adjust);
+
void mpc3_set_rmu_mux(
struct mpc *mpc,
int rmu_idx,
@@ -1074,13 +1085,6 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control);
-
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
index ed9a5549c389..466ba20b9c61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
@@ -26,6 +26,7 @@
#ifndef __DAL_DCN30_VPG_H__
#define __DAL_DCN30_VPG_H__
+#include "vpg.h"
#define DCN30_VPG_FROM_VPG(vpg)\
container_of(vpg, struct dcn30_vpg, base)
@@ -132,28 +133,6 @@ struct dcn30_vpg_mask {
VPG_DCN3_REG_FIELD_LIST(uint32_t);
};
-struct vpg;
-
-struct vpg_funcs {
- void (*update_generic_info_packet)(
- struct vpg *vpg,
- uint32_t packet_index,
- const struct dc_info_packet *info_packet,
- bool immediate_update);
-
- void (*vpg_poweron)(
- struct vpg *vpg);
-
- void (*vpg_powerdown)(
- struct vpg *vpg);
-};
-
-struct vpg {
- const struct vpg_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dcn30_vpg {
struct vpg base;
const struct dcn30_vpg_registers *regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
index 73db962dbc03..067e49cb238e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
@@ -56,10 +56,4 @@ struct dccg *dccg301_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
-struct dccg *dccg301_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *dccg_shift,
- const struct dccg_mask *dccg_mask);
-
#endif //__DCN301_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
index a046664e2031..c1959672df50 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
@@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
+ .init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index e3caaacf7493..e3be0bab4007 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -34,12 +34,14 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
+ DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
SR(PHYASYMCLK_CLOCK_CNTL),\
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL),\
SR(PHYDSYMCLK_CLOCK_CNTL),\
SR(PHYESYMCLK_CLOCK_CNTL),\
SR(DPSTREAMCLK_CNTL),\
+ SR(HDMISTREAMCLK_CNTL),\
SR(SYMCLK32_SE_CNTL),\
SR(SYMCLK32_LE_CNTL),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
@@ -78,6 +80,8 @@
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
@@ -92,6 +96,8 @@
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 26be5fee7411..b2cea59ba5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -205,7 +205,7 @@ void dcn31_link_encoder_set_dio_phy_mux(
}
}
-static void enc31_hw_init(struct link_encoder *enc)
+void enc31_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
index 221671563a0b..ee78ba80797c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
@@ -89,6 +89,7 @@
SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH), \
SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
@@ -222,6 +223,7 @@
SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH), \
SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
@@ -283,4 +285,6 @@ bool dcn31_link_encoder_is_in_alt_mode(
void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings);
+void enc31_hw_init(struct link_encoder *enc);
+
#endif /* __DC_LINK_ENCODER__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 5b7ad38f85e0..03b4ac2f1991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -377,7 +377,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
*/
REG_WAIT(DP_DPHY_SYM32_STATUS,
SAT_UPDATE_PENDING, 0,
- 10, DP_SAT_UPDATE_MAX_RETRY);
+ 100, DP_SAT_UPDATE_MAX_RETRY);
}
void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
x),
25));
+ // If y rounds up to integer, carry it over to x.
+ if (y >> 25) {
+ x += 1;
+ y = 0;
+ }
+
switch (stream_encoder_inst) {
case 0:
REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 45143459eedd..678db949cfe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -474,6 +474,10 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
&info_frame->hdrsmd,
true);
+ /* packetIndex 4 is used for send immediate sdp message, and please
+ * use other packetIndex (such as 5,6) for other info packet
+ */
+
if (info_frame->adaptive_sync.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 5b5b5e0775fa..b906db6e7355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -172,7 +172,7 @@ static uint32_t convert_and_clamp(
static bool hubbub31_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -362,7 +362,7 @@ static bool hubbub31_program_urgent_watermarks(
static bool hubbub31_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -635,7 +635,7 @@ static bool hubbub31_program_stutter_watermarks(
static bool hubbub31_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -717,7 +717,7 @@ static bool hubbub31_program_pstate_watermarks(
static bool hubbub31_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 281be20b1a10..20c6fe48567f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -173,5 +173,12 @@ void dcn31_panel_cntl_construct(
break;
}
- dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
+ if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1)
+ //If supported, power sequencer mapping shall follow the DIG instance
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
+ else
+ /* If not supported, pwrseq will be assigned in order,
+ * so first pwrseq will be assigned to first panel instance (legacy behavior)
+ */
+ dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
index f1deb1c3c363..cfb923d85630 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
@@ -63,7 +63,12 @@ void vpg31_poweron(struct vpg *vpg)
{
struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
- if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ uint32_t vpg_gsp_mem_pwr_state;
+
+ REG_GET(VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, &vpg_gsp_mem_pwr_state);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false &&
+ vpg_gsp_mem_pwr_state == 0)
return;
REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
index 0e76eabce441..609e58dbd056 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
@@ -26,6 +26,7 @@
#ifndef __DAL_DCN31_VPG_H__
#define __DAL_DCN31_VPG_H__
+#include "vpg.h"
#define DCN31_VPG_FROM_VPG(vpg)\
container_of(vpg, struct dcn31_vpg, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
index 5314770fff1c..a58c37165f5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
@@ -11,7 +11,7 @@
# Makefile for dcn32.
DCN32 = dcn32_hubbub.o dcn32_dccg.o \
- dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
+ dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
dcn32_hpo_dp_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d761b0df2878..d9ff95cd2dbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -34,6 +34,7 @@
#include "dc_bios_types.h"
#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
#include "gpio_service_interface.h"
#ifndef MIN
@@ -61,6 +62,38 @@
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
void enc32_hw_init(struct link_encoder *enc)
{
@@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t dp_alt_mode_disable = 0;
- bool is_usb_c_alt_mode = false;
- if (enc->features.flags.bits.DP_IS_USB_C) {
- /* if value == 1 alt mode is disabled, otherwise it is enabled */
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
- }
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
- return is_usb_c_alt_mode;
+ return true;
}
-static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return false;
+
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+}
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t is_in_usb_c_dp4_mode = 0;
+ union dmub_rb_cmd cmd;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- /* in usb c dp2 mode, max lane count is 2 */
- if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- if (!is_in_usb_c_dp4_mode)
- link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
- }
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return;
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
+
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -203,10 +248,10 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
-
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
+
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index bbcfce06bec0..35d23d9db45e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -26,15 +26,7 @@
#ifndef __DC_LINK_ENCODER__DCN32_H__
#define __DC_LINK_ENCODER__DCN32_H__
-#include "dcn31/dcn31_dio_link_encoder.h"
-
-#define LE_DCN32_REG_LIST(id)\
- LE_DCN31_REG_LIST(id),\
- SRI(DIG_FIFO_CTRL0, DIG, id)
-
-#define LINK_ENCODER_MASK_SH_LIST_DCN32(mask_sh) \
- LINK_ENCODER_MASK_SH_LIST_DCN31(mask_sh),\
- LE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
+#include "dcn30/dcn30_dio_link_encoder.h"
void dcn32_link_encoder_construct(
struct dcn20_link_encoder *enc20,
@@ -53,4 +45,9 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 1be5410cce97..ca53d39561d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -177,11 +177,12 @@
SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\
SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\
- SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
- SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
+ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh)
+
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 88dfc907553d..515c4c2b4c21 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -167,7 +167,7 @@ static uint32_t convert_and_clamp(
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -357,7 +357,7 @@ bool hubbub32_program_urgent_watermarks(
bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -503,7 +503,7 @@ bool hubbub32_program_stutter_watermarks(
bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -656,7 +656,7 @@ bool hubbub32_program_pstate_watermarks(
bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -750,7 +750,7 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
static bool hubbub32_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index f073839a4b6d..e439ba0fa30f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -118,25 +118,25 @@
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index e789e654c387..e408e859b355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
+ mpc3_mpc_init(mpc);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
@@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc32_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc32_program_shaper,
.program_3dlut = mpc32_program_3dlut,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 87760600e154..fbcd6f7bc993 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -35,25 +35,6 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
-
-uint32_t dcn32_helper_mall_bytes_to_ways(
- struct dc *dc,
- uint32_t total_size_in_mall_bytes)
-{
- uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
-
- /* add 2 lines for worst case alignment */
- cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
-
- total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- return num_ways;
-}
-
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -112,8 +93,10 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(
if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
if (dc->debug.force_subvp_num_ways) {
return dc->debug.force_subvp_num_ways;
+ } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
} else {
- return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
+ return 0;
}
} else {
return 0;
@@ -399,7 +382,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -782,3 +765,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
pipe_cnt++;
}
}
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 13be5f06d987..05783daa62ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -127,11 +127,6 @@ void dcn321_link_encoder_construct(
* while doing the DP sink detect
*/
-/* if (dal_adapter_service_is_feature_supported(as,
- FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
- enc10->base.features.flags.bits.
- DP_SINK_DETECT_POLL_DATA_PIN = true;*/
-
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
@@ -191,7 +186,6 @@ void dcn321_link_encoder_construct(
__func__,
result);
}
- if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ if (enc10->base.ctx->dc->debug.hdmi20_disable)
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
index 0e317e0c36a0..d5b4533d2f62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
@@ -13,7 +13,7 @@
DCN35 = dcn35_dio_stream_encoder.o \
dcn35_dio_link_encoder.o dcn35_dccg.o \
dcn35_hubp.o dcn35_hubbub.o \
- dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
+ dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o
AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
index f1ba7bb792ea..58dd3c5bbff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
@@ -49,15 +49,23 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
@@ -100,6 +108,32 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
+static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
+ uint32_t dpp_inst, uint32_t enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ return;
+
+ switch (dpp_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
+ break;
+ default:
+ break;
+ }
+}
+
static void dccg35_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -333,21 +367,67 @@ static void dccg35_set_dpstreamclk(
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
case 0:
- REG_UPDATE_2(DPSTREAMCLK_CNTL,
- DPSTREAMCLK0_EN,
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+
+static void dccg35_set_dpstreamclk_root_clock_gating(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (dp_hpo_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, enable ? 1 : 0);
+ }
break;
default:
BREAK_TO_DEBUGGER();
@@ -355,6 +435,8 @@ static void dccg35_set_dpstreamclk(
}
}
+
+
static void dccg35_set_physymclk_root_clock_gating(
struct dccg *dccg,
int phy_inst,
@@ -369,22 +451,32 @@ static void dccg35_set_physymclk_root_clock_gating(
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -407,10 +499,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 1,
PHYASYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 0,
PHYASYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 1:
@@ -418,10 +516,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 1,
PHYBSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 0,
PHYBSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 2:
@@ -429,10 +533,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 1,
PHYCSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 0,
PHYCSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 3:
@@ -440,10 +550,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 1,
PHYDSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 0,
PHYDSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 4:
@@ -451,10 +567,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
PHYESYMCLK_EN, 1,
PHYESYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
PHYESYMCLK_EN, 0,
PHYESYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
default:
@@ -491,12 +613,12 @@ static void dccg35_dpp_root_clock_control(
if (clock_on) {
/* turn off the DTO and leave phase/modulo at max */
- dcn35_set_dppclk_enable(dccg, dpp_inst, 0);
+ dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0xFF,
DPPCLK0_DTO_MODULO, 0xFF);
} else {
- dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
+ dcn35_set_dppclk_enable(dccg, dpp_inst, 0);
/* turn on the DTO to generate a 0hz clock */
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
@@ -575,18 +697,32 @@ void dccg35_init(struct dccg *dccg)
dccg35_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- for (otg_inst = 0; otg_inst < 2; otg_inst++)
+ for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ }
+
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// for (otg_inst = 0; otg_inst < 4; otg_inst++)
+// dccg35_disable_symclk_se(dccg, otg_inst, otg_inst);
+
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
- for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst,
+ for (otg_inst = 0; otg_inst < 4; otg_inst++) {
+ dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
+ dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ }
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
for (otg_inst = 0; otg_inst < 5; otg_inst++)
dccg35_set_physymclk_root_clock_gating(dccg, otg_inst,
false);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ for (otg_inst = 0; otg_inst < 4; otg_inst++)
+ dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0);
+
/*
dccg35_enable_global_fgcg_rep(
dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits
@@ -611,24 +747,32 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -650,24 +794,32 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 0);
break;
default:
return;
@@ -682,22 +834,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
case 0:
REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1);
break;
case 4:
REG_UPDATE(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 1);
break;
}
@@ -706,26 +868,36 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_FE_EN, 1,
SYMCLKA_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_FE_EN, 1,
SYMCLKB_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_FE_EN, 1,
SYMCLKC_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_FE_EN, 1,
SYMCLKD_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1);
break;
case 4:
REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_FE_EN, 1,
SYMCLKE_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1);
break;
}
}
@@ -786,26 +958,36 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_FE_EN, 0,
SYMCLKA_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_FE_EN, 0,
SYMCLKB_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_FE_EN, 0,
SYMCLKC_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_FE_EN, 0,
SYMCLKD_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 0);
break;
case 4:
REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_FE_EN, 0,
SYMCLKE_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0);
break;
}
@@ -818,22 +1000,32 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
case 0:
REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 0);
break;
case 4:
REG_UPDATE(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 0);
break;
}
}
@@ -845,6 +1037,7 @@ static const struct dccg_funcs dccg35_funcs = {
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg35_init,
.set_dpstreamclk = dccg35_set_dpstreamclk,
+ .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating,
.enable_symclk32_se = dccg31_enable_symclk32_se,
.disable_symclk32_se = dccg35_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d5835b..20f810a6646c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -80,7 +80,6 @@ enum signal_type dcn35_get_dig_mode(
default:
return SIGNAL_TYPE_NONE;
}
- return SIGNAL_TYPE_NONE;
}
void dcn35_link_encoder_setup(
@@ -119,7 +118,7 @@ void dcn35_link_encoder_setup(
void dcn35_link_encoder_init(struct link_encoder *enc)
{
- enc32_hw_init(enc);
+ enc31_hw_init(enc);
dcn35_link_encoder_set_fgcg(enc, enc->ctx->dc->debug.enable_fine_grain_clock_gating.bits.dio);
}
@@ -184,6 +183,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +239,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
index e1e560732a9d..d546a3676304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
@@ -37,7 +37,9 @@
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SOFT_RESET, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CLK_CNTL, HDCP_SOFT_RESET, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_CLOCK_ON, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_HDCP_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_TMDS_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
@@ -114,7 +116,15 @@
LE_SF(DIO_CLK_CNTL, SYMCLK_FE_G_GATE_DIS, mask_sh),\
LE_SF(DIO_CLK_CNTL, SYMCLK_R_GATE_DIS, mask_sh),\
LE_SF(DIO_CLK_CNTL, SYMCLK_G_GATE_DIS, mask_sh),\
- LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh)
+ LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, DISPCLK_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKA_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKB_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKC_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKD_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKE_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKF_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKG_G_HDCP_GATE_DIS, mask_sh)
void dcn35_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
index 339bf0c722dd..6293173ba2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
@@ -111,7 +111,7 @@ static uint32_t convert_and_clamp(
static bool hubbub35_program_stutter_z8_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -297,7 +297,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
static bool hubbub35_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 4229369c57f4..f4d3f04ec857 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -26,6 +26,9 @@
#ifndef DM_CP_PSP_IF__H
#define DM_CP_PSP_IF__H
+/*
+ * Interface to CPLIB/PSP to enable ASSR
+ */
struct dc_link;
struct cp_psp_stream_config {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 6d7a15dcf8a7..34adae7ab6e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -36,6 +36,7 @@
struct dc_dp_mst_stream_allocation_table;
struct aux_payload;
enum aux_return_code_type;
+enum set_config_status;
/*
* Allocate memory accessible by the GPU
@@ -200,7 +201,7 @@ int dm_helper_dmub_aux_transfer_sync(
const struct dc_link *link,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
-enum set_config_status;
+
int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
const struct dc_link *link,
struct set_config_cmd_payload *payload,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index d0eed3b4771e..9405c47ee2a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -275,6 +275,16 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
#define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX)
/*
+ * SMU message tracing
+ */
+void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
+void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
+
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
+#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+
+
+/*
* DMUB Interfaces
*/
bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 59ade76ffb18..c4a5efd2dda5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -92,6 +92,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
@@ -126,6 +127,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_rcflags)
@@ -157,6 +159,7 @@ DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
+DML += dcn351/dcn351_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 38ab9ad60ef8..74da9ebda016 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1085,6 +1085,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
@@ -1092,8 +1095,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
- return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else if (is_pwrseq0 && (is_psr || is_replay))
+ return DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
} else {
@@ -2369,7 +2372,7 @@ validate_out:
static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl)
{
- struct _vcs_dpi_voltage_scaling_st low_pstate_lvl;
+ struct _vcs_dpi_voltage_scaling_st low_pstate_lvl = {0};
int i;
low_pstate_lvl.state = 1;
@@ -2474,7 +2477,7 @@ void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc,
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
- struct writeback_st dout_wb;
+ struct writeback_st dout_wb = {0};
dc_assert_fp_enabled();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index ccb4ad78f667..81f7b90849ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -260,7 +260,7 @@ void dcn30_fpu_populate_dml_writeback_from_context(
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
- struct writeback_st dout_wb;
+ struct writeback_st dout_wb = {0};
dc_assert_fp_enabled();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 63c48c29ba49..e0b52db2c210 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3535,7 +3535,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
@@ -4273,7 +4272,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
@@ -4576,7 +4575,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Return BW
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->BlendingAndTiming[k] == k) {
@@ -4635,7 +4634,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
}
@@ -4646,7 +4645,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
@@ -4656,7 +4655,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
v->ReturnBusWidth * v->DCFCLKState[i][j],
@@ -4674,7 +4673,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Re-ordering Buffer Support Check
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
@@ -4692,7 +4691,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
}
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
@@ -4708,7 +4707,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Prefetch Check
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
int NextPrefetchModeState = MinPrefetchMode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index 3eb3a021ab7d..3f02bb806d42 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
+ } else {
+ /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+ * This is required for dcn303 because it just so happens that the memory
+ * bandwidth is low enough such that all the optimal DCFCLK for each UCLK
+ * is lower than the smallest DCFCLK STA target. In this case we need to
+ * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK.
+ */
+ if (j == num_uclk_states - 1) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index deb6d162a2d5..94317b2e4a85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
.num_chans = 4,
.dummy_pstate_latency_us = 10.0
};
@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .dispclk_dppclk_vco_speed_mhz = 2500.0,
};
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
@@ -485,6 +487,7 @@ void dcn31_calculate_wm_and_dlg_fp(
{
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ uint32_t cstate_enter_plus_exit_z8_ns;
dc_assert_fp_enabled();
@@ -504,6 +507,13 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ cstate_enter_plus_exit_z8_ns =
+ get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (get_stutter_period(&context->bw_ctx.dml, pipes, pipe_cnt) < dc->debug.minimum_z8_residency_time &&
+ cstate_enter_plus_exit_z8_ns < dc->debug.minimum_z8_residency_time * 1000)
+ cstate_enter_plus_exit_z8_ns = dc->debug.minimum_z8_residency_time * 1000;
+
/* Set A:
* All clocks min required
*
@@ -514,7 +524,7 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 8f9c8faed260..d2ae43a82ba5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -30,6 +30,7 @@
#define DCN3_15_DEFAULT_DET_SIZE 192
#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_DEFAULT_DET_SIZE 192
+#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index adea459e7d36..33cf824c5da1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3679,7 +3679,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index fb21572750e8..21f637ae4add 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -310,7 +310,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 88e56889a68c..3242957d00c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3788,7 +3788,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a0a65e099104..f6fe0a64beac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -180,6 +180,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
+static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context,
+ bool *repopulate_pipes, int *split, bool *merge);
+
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
{
/* defaults */
@@ -622,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
@@ -720,7 +723,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
- struct pipe_ctx *subvp_pipes[2];
+ struct pipe_ctx *subvp_pipes[2] = {0};
struct dc_stream_state *phantom = NULL;
uint32_t microschedule_lines = 0;
uint32_t index = 0;
@@ -1425,13 +1428,14 @@ static bool is_test_pattern_enabled(
return false;
}
-static void dcn32_full_validate_bw_helper(struct dc *dc,
+static bool dcn32_full_validate_bw_helper(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *vlevel,
int *split,
bool *merge,
- int *pipe_cnt)
+ int *pipe_cnt,
+ bool *repopulate_pipes)
{
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
unsigned int dc_pipe_idx = 0;
@@ -1461,6 +1465,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->VoltageLevel = *vlevel;
}
+ /* Apply split and merge flags before checking for subvp */
+ if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge))
+ return false;
+ memset(split, 0, MAX_PIPES * sizeof(int));
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
* 2. Full update (i.e. !fast_validate)
@@ -1475,19 +1485,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
- dcn32_merge_pipes_for_subvp(dc, context);
- memset(merge, 0, MAX_PIPES * sizeof(bool));
-
vlevel_temp = *vlevel;
- /* to re-initialize viewport after the pipe merge */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state || !pipe_ctx->stream)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1576,8 +1574,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* add phantom pipes. If pipe split (ODM / MPC) is required, both the main
* and phantom pipes will be split in the regular pipe splitting sequence.
*/
- memset(split, 0, MAX_PIPES * sizeof(int));
- memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
@@ -1590,6 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
+ return true;
}
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
@@ -1929,106 +1926,23 @@ static bool dcn32_split_stream_for_mpc_or_odm(
return true;
}
-bool dcn32_internal_validate_bw(struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *vlevel_out,
- bool fast_validate)
+static bool dcn32_apply_merge_split_flags_helper(
+ struct dc *dc,
+ struct dc_state *context,
+ bool *repopulate_pipes,
+ int *split,
+ bool *merge)
{
- bool out = false;
- bool repopulate_pipes = false;
- int split[MAX_PIPES] = { 0 };
- bool merge[MAX_PIPES] = { false };
+ int i, pipe_idx;
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx;
- int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
- dc_assert_fp_enabled();
-
- ASSERT(pipes);
- if (!pipes)
- return false;
-
- // For each full update, remove all existing phantom pipes first
- dc_state_remove_phantom_streams_and_planes(dc, context);
- dc_state_release_phantom_streams_and_planes(dc, context);
-
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
-
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
-
- if (!fast_validate)
- dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
-
- if (fast_validate ||
- (dc->debug.dml_disallow_alternate_prefetch_modes &&
- (vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
- /*
- * If dml_disallow_alternate_prefetch_modes is false, then we have already
- * tried alternate prefetch modes during full validation.
- *
- * If mode is unsupported or there is no p-state support, then
- * fall back to favouring voltage.
- *
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
- * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
- */
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_none;
-
- context->bw_ctx.dml.validate_max_state = fast_validate;
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.validate_max_state = false;
-
- if (vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, sizeof(split));
- memset(merge, 0, sizeof(merge));
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
- // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
- vba->VoltageLevel = vlevel;
- }
- }
-
- dml_log_mode_support_params(&context->bw_ctx.dml);
-
- if (vlevel == context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
-
- if (!pipe->stream)
- continue;
-
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && !dc->config.enable_windowed_mpo_odm
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_state->clip_rect,
- &pipe->stream->src,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
- }
- pipe_idx++;
- }
-
if (dc->config.enable_windowed_mpo_odm) {
- repopulate_pipes = update_pipes_with_split_flags(
- dc, context, vba, split, merge);
+ if (update_pipes_with_split_flags(
+ dc, context, vba, split, merge))
+ *repopulate_pipes = true;
} else {
+
/* the code below will be removed once windowed mpo odm is fully
* enabled.
*/
@@ -2085,7 +1999,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
- repopulate_pipes = true;
+ *repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
@@ -2101,7 +2015,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
- repopulate_pipes = true;
+ *repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
@@ -2140,15 +2054,15 @@ bool dcn32_internal_validate_bw(struct dc *dc,
hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(hsplit_pipe);
if (!hsplit_pipe)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe, odm))
- goto validate_fail;
+ return false;
newly_split[hsplit_pipe->pipe_idx] = true;
- repopulate_pipes = true;
+ *repopulate_pipes = true;
}
if (split[i] == 4) {
struct pipe_ctx *pipe_4to1;
@@ -2163,11 +2077,11 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, pipe_4to1, odm))
- goto validate_fail;
+ return false;
newly_split[pipe_4to1->pipe_idx] = true;
if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
@@ -2182,11 +2096,11 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
hsplit_pipe, pipe_4to1, odm))
- goto validate_fail;
+ return false;
newly_split[pipe_4to1->pipe_idx] = true;
}
if (odm)
@@ -2198,11 +2112,122 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->plane_state) {
if (!resource_build_scaling_params(pipe))
- goto validate_fail;
+ return false;
}
}
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
+ context->streams[i]);
+
+ if (otg_master)
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+ }
+ }
+ return true;
+}
+
+bool dcn32_internal_validate_bw(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ dc_assert_fp_enabled();
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ /* For each full update, remove all existing phantom pipes first */
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
+
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
+
+ if (!fast_validate) {
+ if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
+ &pipe_cnt, &repopulate_pipes))
+ goto validate_fail;
+ }
+
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
+ /*
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
+ *
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ */
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_none;
+
+ context->bw_ctx.dml.validate_max_state = fast_validate;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ context->bw_ctx.dml.validate_max_state = false;
+
+ if (vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ /* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */
+ vba->VoltageLevel = vlevel;
+ }
}
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && !dc->config.enable_windowed_mpo_odm
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge))
+ goto validate_fail;
+
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 80fccd4999a5..ba1310c8fd77 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1650,6 +1650,8 @@ double dml32_TruncToValidBPP(
MaxLinkBPP = 2 * MaxLinkBPP;
}
+ *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
+
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP)
@@ -1676,10 +1678,6 @@ double dml32_TruncToValidBPP(
else
return DesiredBPP;
}
-
- *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
-
- return BPP_INVALID;
} // TruncToValidBPP
double dml32_RequiredDTBCLK(
@@ -1975,8 +1973,8 @@ void dml32_CalculateVMRowAndSwath(
unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX] = {0};
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX] = {0};
unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
@@ -4291,7 +4289,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
unsigned int i, j, k;
unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
unsigned int DRAMClockChangeSupportNumber = 0;
- unsigned int LastSurfaceWithoutMargin;
+ unsigned int LastSurfaceWithoutMargin = 0;
unsigned int DRAMClockChangeMethod = 0;
bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
double MinActiveFCLKChangeMargin = 0.;
@@ -5656,9 +5654,9 @@ void dml32_CalculateStutterEfficiency(
double LastZ8StutterPeriod = 0.0;
double LastStutterPeriod = 0.0;
unsigned int TotalNumberOfActiveOTG = 0;
- double doublePixelClock;
- unsigned int doubleHTotal;
- unsigned int doubleVTotal;
+ double doublePixelClock = 0;
+ unsigned int doubleHTotal = 0;
+ unsigned int doubleVTotal = 0;
bool SameTiming = true;
double DETBufferingTimeY;
double SwathWidthYCriticalSurface = 0.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 7ea2bd5374d5..60f251cf973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.num_states = 5,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 0,
+ .do_urgent_latency_adjustment = 1,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
@@ -439,7 +439,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
@@ -577,18 +577,21 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
{
enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
unsigned int i, plane_count = 0;
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
- if (plane_count == 0) {
+ if (context->stream_count == 0 || plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW;
- } else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
bool is_pwrseq0 = link && link->link_index == 0;
- bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
int minmum_z8_residency =
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
@@ -596,13 +599,18 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && is_psr1)
- support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else if (is_pwrseq0 && (is_psr || is_replay))
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+
}
+ DC_LOG_SMU("zstate_support: %d, StutterPeriod: %d\n", support,
+ (int)context->bw_ctx.dml.vba.StutterPeriod);
+
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
new file mode 100644
index 000000000000..e4f333d4fb54
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+#include "resource.h"
+#include "dcn351_fpu.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+#include "dcn351/dcn351_resource.h"
+#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dml/dml_inline_defs.h"
+
+#include "link.h"
+
+#define DC_LOGGER_INIT(logger)
+
+struct _vcs_dpi_ip_params_st dcn3_51_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 1536,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,/*not used*/
+ .opp_output_buffer_lines = 1,/*not used*/
+ .pixel_chunk_size_kbytes = 8,
+ //.alpha_pixel_chunk_size_kbytes = 4;/*new*/
+ //.min_pixel_chunk_size_bytes = 1024;/*new*/
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 12,/*delta from 10*/
+ .dsc422_native_support = true,/*delta from false*/
+ .is_line_buffer_bpp_fixed = true,/*new*/
+ .line_buffer_fixed_bpp = 32,/*delta from 48*/
+ .line_buffer_size_bits = 986880,/*delta from 789504*/
+ .max_line_buffer_lines = 32,/*delta from 12*/
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ /*.max_num_hdmi_frl_outputs = 1; new in dml2*/
+ /*.max_num_dp2p0_outputs = 2; new in dml2*/
+ /*.max_num_dp2p0_streams = 4; new in dml2*/
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 68,/*changed from 64,*/
+ .dpte_buffer_size_in_pte_reqs_chroma = 36,/*changed from 34*/
+ /*.dcc_meta_buffer_size_bytes = 6272; new to dml2*/
+ .dispclk_ramp_margin_percent = 1.11,/*delta from 1*/
+ /*.dppclk_delay_subtotal = 47;
+ .dppclk_delay_scl = 50;
+ .dppclk_delay_scl_lb_only = 16;
+ .dppclk_delay_cnvc_formatter = 28;
+ .dppclk_delay_cnvc_cursor = 6;
+ .dispclk_delay_subtotal = 125;*/ /*new to dml2*/
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 47, /* changed from 46,*/
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 28,/*changed from 27,*/
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 125, /*changed from 119,*/
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+// .config_return_buffer_segment_size_in_kbytes = 64;/*required, hard coded in dml2_translate_ip_params*/
+
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .socclk_mhz = 600.0,
+ .dram_speed_mts = 3200.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 600.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 200.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 600.0,
+ .fabricclk_mhz = 1000.0,
+ .socclk_mhz = 733.0,
+ .dram_speed_mts = 6400.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 266.7,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 738.0,
+ .fabricclk_mhz = 1200.0,
+ .socclk_mhz = 880.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 266.7,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 800.0,
+ .fabricclk_mhz = 1400.0,
+ .socclk_mhz = 978.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 960.0,
+ .dppclk_mhz = 960.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 320.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 873.0,
+ .fabricclk_mhz = 1600.0,
+ .socclk_mhz = 1100.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1066.7,
+ .dppclk_mhz = 1066.7,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 355.6,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 960.0,
+ .fabricclk_mhz = 1700.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 400.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 1067.0,
+ .fabricclk_mhz = 1850.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1371.4,
+ .dppclk_mhz = 1371.4,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 457.1,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 2000.0,
+ .socclk_mhz = 1467.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1600.0,
+ .dppclk_mhz = 1600.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 533.3,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 8,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
+ .fclk_change_latency_us = 24.0,
+ .usr_retraining_latency_us = 2,
+ .writeback_latency_us = 12.0,
+
+ .dram_channel_width_bytes = 4,/*not exist in dml2*/
+ .round_trip_ping_latency_dcfclk_cycles = 106,/*not exist in dml2*/
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .dram_clock_change_latency_us = 11.72,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_fabric_bw_after_urgent = 80.0, /*new to dml2*/
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = 0,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
+};
+
+/*
+ * dcn351_update_bw_bounding_box
+ *
+ * This would override some dcn3_51 ip_or_soc initial parameters hardcoded from
+ * spreadsheet with actual values as per dGPU SKU:
+ * - with passed few options from dc->config
+ * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
+ * need to get it from PM FW)
+ * - with passed latency values (passed in ns units) in dc-> bb override for
+ * debugging purposes
+ * - with passed latencies from VBIOS (in 100_ns units) if available for
+ * certain dGPU SKU
+ * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
+ * of the same ASIC)
+ * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
+ * FW for different clocks (which might differ for certain dGPU SKU of the
+ * same ASIC)
+ */
+void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ struct clk_bw_params *bw_params)
+{
+ unsigned int i, closest_clk_lvl;
+ int j;
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dc->scratch.update_bw_bounding_box.clock_limits;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dc_assert_fp_enabled();
+
+ dcn3_51_ip.max_num_otg =
+ dc->res_pool->res_cap->num_timing_generator;
+ dcn3_51_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_51_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_51_soc.num_states - 1;
+ j >= 0; j--) {
+ if (dcn3_51_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_51_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz <
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+
+ clock_limits[i].fabricclk_mhz =
+ clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz =
+ clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz &&
+ clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts =
+ clk_table->entries[i].memclk_mhz * 2 *
+ clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ?
+ max_dispclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ?
+ max_dppclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ memcpy(dcn3_51_soc.clock_limits, clock_limits,
+ sizeof(dcn3_51_soc.clock_limits));
+
+ if (clk_table->num_entries)
+ dcn3_51_soc.num_states = clk_table->num_entries;
+
+ if (max_dispclk_mhz) {
+ dcn3_51_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+ if ((int)(dcn3_51_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_51_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_51_soc, &dcn3_51_ip,
+ DML_PROJECT_DCN31);
+
+ /*copy to dml2, before dml2_create*/
+ if (clk_table->num_entries > 2) {
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ dc->dml2_options.bbox_overrides.clks_table.num_states =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
+ clock_limits[i].dcfclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
+ clock_limits[i].fabricclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
+ clock_limits[i].dispclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
+ clock_limits[i].dppclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
+ clock_limits[i].socclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
+ }
+ }
+
+ /* Update latency values */
+ dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_51_soc.dram_clock_change_latency_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_51_soc.sr_exit_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_51_soc.sr_enter_plus_exit_time_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_51_soc.sr_exit_z8_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_51_soc.sr_enter_plus_exit_z8_time_us;
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
+int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe = 0;
+ bool upscaled = false;
+ const unsigned int max_allowed_vblank_nom = 1023;
+
+ dcn31_populate_dml_pipes_from_context(dc, context, pipes,
+ fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ num_lines = micro_sec_to_vert_lines(dcn3_51_ip.VBlankNomDefaultUS, timing);
+ v_back_porch = get_vertical_back_porch(timing);
+
+ if (pipe->stream->adjust.v_total_max ==
+ pipe->stream->adjust.v_total_min &&
+ pipe->stream->adjust.v_total_min > timing->v_total) {
+ pipes[pipe_cnt].pipe.dest.vtotal =
+ pipe->stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total -
+ pipes[pipe_cnt].pipe.dest.vactive;
+ }
+
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height <
+ pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width <
+ pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the
+ * plane. We need to require support for immediate flip or
+ * underflow can be intermittently experienced depending on peak
+ * b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+
+ DC_FP_START();
+ dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ DC_FP_END();
+
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/
+ dc->config.enable_4to1MPC = false;
+
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 &&
+ pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) &&
+ pipe->plane_state->src_rect.width <= 5120) {
+ /*
+ * Limit to 5k max to avoid forced pipe split when there
+ * is not enough detile for swath
+ */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >=
+ dc->debug.crb_alloc_policy_min_disp_count &&
+ dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes =
+ dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP &&
+ dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode ==
+ dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy =
+ dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
+
+void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
+ unsigned int i, plane_count = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /*dcn351 does not support z9/z10*/
+ if (context->stream_count == 0 || plane_count == 0) {
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ bool is_pwrseq0 = link && link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+ int minmum_z8_residency =
+ dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
+ if (is_pwrseq0 && (is_psr || is_replay))
+ support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
+ }
+ context->bw_ctx.bw.dcn.clk.zstate_support = support;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
new file mode 100644
index 000000000000..f93efab9a668
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DCN351_FPU_H__
+#define __DCN351_FPU_H__
+
+#include "clk_mgr.h"
+
+void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ struct clk_bw_params *bw_params);
+
+int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index acff3449b8d7..1c9498a72520 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -67,6 +67,7 @@ frame_warn_flag := -Wframe-larger-than=2048
endif
endif
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 9be5ebf3a8c0..3e919f5c00ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -31,6 +31,8 @@
#include "dml_assert.h"
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
+#define TB_BORROWED_MAX 400
+
// ---------------------------
// Declaration Begins
// ---------------------------
@@ -2782,6 +2784,8 @@ static dml_float_t TruncToValidBPP(
}
}
+ *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1));
+
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP) {
@@ -2810,10 +2814,6 @@ static dml_float_t TruncToValidBPP(
return DesiredBPP;
}
}
-
- *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1));
-
- return __DML_DPP_INVALID__;
} // TruncToValidBPP
static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
@@ -3790,9 +3790,9 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat
dml_bool_t FoundCriticalSurface = false;
dml_uint_t TotalNumberOfActiveOTG = 0;
- dml_float_t SinglePixelClock;
- dml_uint_t SingleHTotal;
- dml_uint_t SingleVTotal;
+ dml_float_t SinglePixelClock = 0;
+ dml_uint_t SingleHTotal = 0;
+ dml_uint_t SingleVTotal = 0;
dml_bool_t SameTiming = true;
dml_float_t LastStutterPeriod = 0.0;
@@ -9460,8 +9460,10 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
/* Copy the calculated watermarks to mp.Watermark as the getter functions are
* implemented by the DML team to copy the calculated values from the mp.Watermark interface.
+ * &mode_lib->mp.Watermark and &locals->Watermark are the same address, memcpy may lead to
+ * unexpected behavior. memmove should be used.
*/
- memcpy(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
+ memmove(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.cache_display_cfg.writeback.WritebackEnable[k] == true) {
@@ -10214,6 +10216,7 @@ dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, dml_float_t, mode_lib->m
dml_get_var_func(urgent_latency, dml_float_t, mode_lib->mp.UrgentLatency);
dml_get_var_func(clk_dcf_deepsleep, dml_float_t, mode_lib->mp.DCFCLKDeepSleep);
dml_get_var_func(wm_writeback_dram_clock_change, dml_float_t, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
+dml_get_var_func(wm_writeback_urgent, dml_float_t, mode_lib->mp.Watermark.WritebackUrgentWatermark);
dml_get_var_func(stutter_efficiency, dml_float_t, mode_lib->mp.StutterEfficiency);
dml_get_var_func(stutter_efficiency_no_vblank, dml_float_t, mode_lib->mp.StutterEfficiencyNotIncludingVBlank);
dml_get_var_func(stutter_efficiency_z8, dml_float_t, mode_lib->mp.Z8StutterEfficiency);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
index 8452485684f5..3116b88e99dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
@@ -94,6 +94,7 @@ dml_get_var_decl(wm_usr_retraining, dml_float_t);
dml_get_var_decl(urgent_latency, dml_float_t);
dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t);
+dml_get_var_decl(wm_writeback_urgent, dml_float_t);
dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t);
dml_get_var_decl(stutter_efficiency, dml_float_t);
dml_get_var_decl(stutter_efficiency_z8, dml_float_t);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
index de63364be01d..14d389525296 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
@@ -41,6 +41,7 @@
#define DCN_DML__VM_PRESENT__1 1
#define DCN_DML__HOST_VM_PRESENT 1
#define DCN_DML__HOST_VM_PRESENT__1 1
+#define DCN_DML__DWB 1
#include "dml_depedencies.h"
@@ -59,6 +60,7 @@
#define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE
#define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR
#define __DML_DPP_INVALID__ 0
+#define __DML_NUM_DMB__ DCN_DML__DWB
#define __DML_PIPE_NO_PLANE__ 99
#define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 0baf39d64a2d..ad2a6b4769fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -88,7 +88,8 @@ static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *map
return i;
}
- return -1;
+ ASSERT(false);
+ return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
}
static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
@@ -100,7 +101,8 @@ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *ma
return i;
}
- return -1;
+ ASSERT(false);
+ return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
}
// The master pipe of a stream is defined as the top pipe in odm slice 0
@@ -141,14 +143,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
{
int i;
unsigned int num_found = 0;
- unsigned int plane_id_assigned_to_pipe;
+ unsigned int plane_id_assigned_to_pipe = -1;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
- if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state,
- state->res_ctx.pipe_ctx[i].stream->stream_id,
- ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) {
- if (plane_id_assigned_to_pipe == plane_id)
- pipes[num_found++] = i;
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || !pipe->stream)
+ continue;
+
+ get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id,
+ ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx],
+ &plane_id_assigned_to_pipe);
+ if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe
+ && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) {
+ while (pipe) {
+ struct pipe_ctx *mpc_pipe = pipe;
+
+ while (mpc_pipe) {
+ pipes[num_found++] = mpc_pipe->pipe_idx;
+ mpc_pipe = mpc_pipe->bottom_pipe;
+ if (!mpc_pipe)
+ break;
+ if (mpc_pipe->plane_state != pipe->plane_state)
+ mpc_pipe = NULL;
+ }
+ pipe = pipe->next_odm_pipe;
+ }
+ break;
}
}
@@ -566,8 +587,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru
unsigned int num_found = 0;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
- if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
- pipes[num_found++] = i;
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ while (pipe) {
+ pipes[num_found++] = pipe->pipe_idx;
+ pipe = pipe->next_odm_pipe;
+ }
+ break;
}
}
@@ -768,8 +795,8 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index);
}
-static unsigned int get_mpc_factor(struct dml2_context *ctx,
- const struct dc_state *state,
+static unsigned int get_target_mpc_factor(struct dml2_context *ctx,
+ struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_status *status,
@@ -780,10 +807,10 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
unsigned int cfg_idx;
unsigned int mpc_factor;
- get_plane_id(ctx, state, status->plane_states[plane_idx],
- stream->stream_id, plane_idx, &plane_id);
- cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
if (ctx->architecture == dml2_architecture_20) {
+ get_plane_id(ctx, state, status->plane_states[plane_idx],
+ stream->stream_id, plane_idx, &plane_id);
+ cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
} else {
mpc_factor = 1;
@@ -797,16 +824,18 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
return mpc_factor;
}
-static unsigned int get_odm_factor(
+static unsigned int get_target_odm_factor(
const struct dml2_context *ctx,
+ struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_state *stream)
{
- unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id(
- mapping, stream->stream_id);
+ unsigned int cfg_idx;
- if (ctx->architecture == dml2_architecture_20)
+ if (ctx->architecture == dml2_architecture_20) {
+ cfg_idx = find_disp_cfg_idx_by_stream_id(
+ mapping, stream->stream_id);
switch (disp_cfg->hw.ODMMode[cfg_idx]) {
case dml_odm_mode_bypass:
return 1;
@@ -817,83 +846,122 @@ static unsigned int get_odm_factor(
default:
break;
}
+ }
ASSERT(false);
return 1;
}
+static unsigned int get_source_odm_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream);
+
+ return ctx->config.callbacks.get_odm_slice_count(otg_master);
+}
+
+static unsigned int get_source_mpc_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_plane_state *plane)
+{
+ struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
+ int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane,
+ &state->res_ctx, dpp_pipes);
+
+ ASSERT(dpp_pipe_count > 0);
+ return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]);
+}
+
+
static void populate_mpc_factors_for_stream(
struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_state *state,
+ struct dc_state *state,
unsigned int stream_idx,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
int i;
- for (i = 0; i < status->plane_count; i++)
- if (odm_factor == 1)
- mpc_factors[i] = get_mpc_factor(
- ctx, state, disp_cfg, mapping, status,
- state->streams[stream_idx], i);
- else
- mpc_factors[i] = 1;
+ for (i = 0; i < status->plane_count; i++) {
+ mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]);
+ mpc_factors[i].target = (odm_factor.target == 1) ?
+ get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1;
+ }
}
static void populate_odm_factors(const struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_state *state,
- unsigned int odm_factors[MAX_PIPES])
+ struct dc_state *state,
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES])
{
int i;
- for (i = 0; i < state->stream_count; i++)
- odm_factors[i] = get_odm_factor(
- ctx, disp_cfg, mapping, state->streams[i]);
+ for (i = 0; i < state->stream_count; i++) {
+ odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]);
+ odm_factors[i].target = get_target_odm_factor(
+ ctx, state, disp_cfg, mapping, state->streams[i]);
+ }
}
-static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx,
struct dc_state *state,
const struct dc_state *existing_state,
const struct dc_stream_state *stream,
const struct dc_stream_status *status,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
int plane_idx;
bool result = true;
- if (odm_factor == 1)
- /*
- * ODM and MPC combines are by DML design mutually exclusive.
- * ODM factor of 1 means MPC factors may be greater than 1.
- * In this case, we want to set ODM factor to 1 first to free up
- * pipe resources from previous ODM configuration before setting
- * up MPC combine to acquire more pipe resources.
- */
+ for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
+ if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target < odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
+ return result;
+}
+
+static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_state *existing_state,
+ const struct dc_stream_state *stream,
+ const struct dc_stream_status *status,
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
+{
+ int plane_idx;
+ bool result = true;
+
for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
- result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
- state,
- existing_state,
- ctx->config.callbacks.dc->res_pool,
- status->plane_states[plane_idx],
- mpc_factors[plane_idx]);
- if (odm_factor > 1)
+ if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target > odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
return result;
}
@@ -903,20 +971,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *existing_state)
{
- unsigned int odm_factors[MAX_PIPES];
- unsigned int mpc_factors_for_stream[MAX_PIPES];
int i;
bool result = true;
- populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors);
- for (i = 0; i < state->stream_count; i++) {
+ populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors);
+ for (i = 0; i < state->stream_count; i++)
populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state,
- i, odm_factors[i], mpc_factors_for_stream);
- result &= map_dc_pipes_for_stream(ctx, state, existing_state,
- state->streams[i],
- &state->stream_status[i],
- odm_factors[i], mpc_factors_for_stream);
- }
+ i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+
return result;
}
@@ -1012,6 +1080,12 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
ASSERT(false);
}
}
+
+ if (ctx->config.callbacks.build_test_pattern_params &&
+ pipe->stream &&
+ pipe->prev_odm_pipe == NULL &&
+ pipe->top_pipe == NULL)
+ ctx->config.callbacks.build_test_pattern_params(&state->res_ctx, pipe);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
index 2f91244a7b01..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
@@ -30,6 +30,8 @@
#include "dml2_dc_types.h"
struct dml2_context;
+struct dml2_dml_to_dc_pipe_mapping;
+struct dml_display_cfg_st;
/*
* dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config.
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index 1cf8a884c0fb..9dab4e43c511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -109,10 +109,21 @@ enum dml2_architecture {
dml2_architecture_20,
};
+struct dml2_pipe_combine_factor {
+ unsigned int source;
+ unsigned int target;
+};
+
+struct dml2_pipe_combine_scratch {
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES];
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES];
+};
+
struct dml2_context {
enum dml2_architecture architecture;
struct dml2_configuration_options config;
struct dml2_helper_det_policy_scratch det_helper_scratch;
+ struct dml2_pipe_combine_scratch pipe_combine_scratch;
union {
struct {
struct display_mode_lib_st dml_core_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 1ba6933d2b36..a41812598ce8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -29,6 +29,7 @@
#include "dml2_translation_helper.h"
#define NUM_DCFCLK_STAS 5
+#define NUM_DCFCLK_STAS_NEW 8
void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
@@ -228,17 +229,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
break;
case dml_project_dcn35:
+ case dml_project_dcn351:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
break;
- case dml_project_dcn351:
- out->num_chans = 16;
- out->round_trip_ping_latency_dcfclk_cycles = 1100;
- out->smn_latency_us = 2;
- break;
}
/* ---Overrides if available--- */
if (dml2->config.bbox_overrides.dram_num_chan)
@@ -253,12 +250,21 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
{
struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch;
struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params;
- unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS];
+ unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0};
+ unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0};
+ unsigned int dml_project = dml2->v20.dml_core_ctx.project;
+
unsigned int i = 0;
unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type
- p->dcfclk_stas_mhz = dcfclk_stas_mhz;
- p->num_dcfclk_stas = NUM_DCFCLK_STAS;
+ if (dml_project == dml_project_dcn351) {
+ p->dcfclk_stas_mhz = dcfclk_stas_mhz_new;
+ p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW;
+ } else {
+ p->dcfclk_stas_mhz = dcfclk_stas_mhz;
+ p->num_dcfclk_stas = NUM_DCFCLK_STAS;
+ }
+
p->in_bbox = in_bbox;
p->out_states = out;
p->in_states = &dml2->v20.scratch.create_scratch.in_states;
@@ -436,8 +442,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
- if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
- dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) {
// Override last out_state with data from last in_state
// This will ensure that out_state contains max fclk
memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
@@ -824,13 +829,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{
+ dml_uint_t width, height;
+
+ if (in->timing.h_addressable > 3840)
+ width = 3840;
+ else
+ width = in->timing.h_addressable; // 4K max
+
+ if (in->timing.v_addressable > 2160)
+ height = 2160;
+ else
+ height = in->timing.v_addressable; // 4K max
+
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256;
- out->ViewportWidth[location] = in->timing.h_addressable;
- out->ViewportHeight[location] = in->timing.v_addressable;
+ out->ViewportWidth[location] = width;
+ out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0;
@@ -849,7 +866,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0;
- out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+ out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57;
@@ -1044,7 +1061,46 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
plane_index = 0;
}
}
-
+static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out,
+ unsigned int location, const struct dc_stream_state *in)
+{
+ if (in->num_wb_info > 0) {
+ for (int i = 0; i < __DML_NUM_DMB__; i++) {
+ const struct dc_writeback_info *wb_info = &in->writeback_info[i];
+ /*current dml support 1 dwb per stream, limitation*/
+ if (wb_info->wb_enabled) {
+ out->WritebackEnable[location] = wb_info->wb_enabled;
+ out->ActiveWritebacksPerSurface[location] = wb_info->dwb_params.cnv_params.src_width;
+ out->WritebackDestinationWidth[location] = wb_info->dwb_params.dest_width;
+ out->WritebackDestinationHeight[location] = wb_info->dwb_params.dest_height;
+
+ out->WritebackSourceWidth[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+
+ out->WritebackSourceHeight[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ /*current design does not have chroma scaling, need to follow up*/
+ out->WritebackHTaps[location] = wb_info->dwb_params.scaler_taps.h_taps > 0 ?
+ wb_info->dwb_params.scaler_taps.h_taps : 1;
+ out->WritebackVTaps[location] = wb_info->dwb_params.scaler_taps.v_taps > 0 ?
+ wb_info->dwb_params.scaler_taps.v_taps : 1;
+
+ out->WritebackHRatio[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ out->WritebackVRatio[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ }
+ }
+ }
+}
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
int i = 0, j = 0, k = 0;
@@ -1089,6 +1145,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
+ /*Call site for populate_dml_writeback_cfg_from_stream_state*/
+ populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback,
+ disp_cfg_stream_location, context->streams[i]);
+
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 1068b962d1c1..0f8b3336e26d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -224,7 +224,7 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int
static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane,
unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id)
{
- int i, j;
+ unsigned int i, j;
bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists;
if (!plane_id)
@@ -234,7 +234,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state
if (state->streams[i]->stream_id == stream_id) {
for (j = 0; j < state->stream_status[i].plane_count; j++) {
if (state->stream_status[i].plane_states[j] == plane &&
- (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
+ (!is_plane_duplicate || (j == plane_index))) {
*plane_id = (i << 16) | j;
return true;
}
@@ -327,6 +327,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
}
+ if (dml_pipe_idx == 0xFFFFFFFF)
+ continue;
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]);
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
@@ -374,10 +376,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz
* 1000;
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz
* 1000;
+
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz ;
+ }
}
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx)
@@ -396,6 +404,71 @@ void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display
watermark->cstate_pstate.cstate_exit_z8_ns = dml_get_wm_z8_stutter(dml_core_ctx) * 1000;
}
+unsigned int dml2_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark)
+{
+ unsigned int time_per_byte = 0;
+ unsigned int total_free_entry = 0xb40;
+ unsigned int buf_lh_capability;
+ unsigned int max_scaled_time;
+
+ if (mode == PACKED_444) /* packed mode 32 bpp */
+ time_per_byte = time_per_pixel/4;
+ else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */
+ time_per_byte = time_per_pixel/8;
+
+ if (time_per_byte == 0)
+ time_per_byte = 1;
+
+ buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/
+ max_scaled_time = buf_lh_capability - urgent_watermark;
+ return max_scaled_time;
+}
+
+void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx)
+{
+ int i, j = 0;;
+ struct mcif_arb_params *wb_arb_params = NULL;
+ struct dcn_bw_writeback *bw_writeback = NULL;
+ enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/
+
+ if (context->stream_count != 0) {
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->num_wb_info != 0)
+ j++;
+ }
+ }
+ if (j == 0) /*no dwb */
+ return;
+ for (i = 0; i < __DML_NUM_DMB__; i++) {
+ bw_writeback = &context->bw_ctx.bw.dcn.bw_writeback;
+ wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[i];
+
+ for (j = 0 ; j < 4; j++) {
+ /*current dml only has one set of watermark, need to follow up*/
+ bw_writeback->mcif_wb_arb[i].cli_watermark[j] =
+ dml_get_wm_writeback_urgent(dml_core_ctx) * 1000;
+ bw_writeback->mcif_wb_arb[i].pstate_watermark[j] =
+ dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000;
+ }
+ if (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk != 0) {
+ /* time_per_pixel should be in u6.6 format */
+ bw_writeback->mcif_wb_arb[i].time_per_pixel =
+ (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
+ }
+ bw_writeback->mcif_wb_arb[i].slice_lines = 32;
+ bw_writeback->mcif_wb_arb[i].arbitration_slice = 2;
+ bw_writeback->mcif_wb_arb[i].max_scaled_time =
+ dml2_calc_max_scaled_time(wb_arb_params->time_per_pixel,
+ wbif_mode, wb_arb_params->cli_watermark[0]);
+ /*not required any more*/
+ bw_writeback->mcif_wb_arb[i].dram_speed_change_duration =
+ dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000;
+
+ }
+}
void dml2_initialize_det_scratch(struct dml2_context *in_ctx)
{
int i;
@@ -468,6 +541,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
else
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id);
+
+ if (dml_pipe_idx == 0xFFFFFFFF)
+ continue;
total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx);
if (total_det_allocated > max_det_size) {
need_recalculation = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
index 5842d6d3c4b6..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
@@ -40,9 +40,14 @@ void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsig
unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled);
void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context);
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx);
+void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx);
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context);
bool dml2_is_stereo_timing(const struct dc_stream_state *stream);
+unsigned int dml2_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark);
/*
* dml2_dc_construct_pipes - This function will determine if we need additional pipes based
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 2a58a7687bdb..9412d5384a41 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -570,6 +570,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
struct dml2_dcn_clocks out_clks;
unsigned int result = 0;
bool need_recalculation = false;
+ uint32_t cstate_enter_plus_exit_z8_ns;
if (!context || context->stream_count == 0)
return true;
@@ -639,8 +640,17 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+ dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
+
+ cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+
+ if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time &&
+ cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000)
+ cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000;
+
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
}
return result;
@@ -681,13 +691,13 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
{
bool out = false;
- if (!(context->bw_ctx.dml2))
+ if (!dml2)
return false;
- dml2_apply_debug_options(in_dc, context->bw_ctx.dml2);
+ dml2_apply_debug_options(in_dc, dml2);
/* Use dml_validate_only for fast_validate path */
@@ -703,13 +713,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
}
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // Allocate Mode Lib Ctx
- *dml2 = dml2_allocate_memory();
-
- if (!(*dml2))
- return false;
// Store config options
(*dml2)->config = *config;
@@ -737,9 +742,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+ // Allocate Mode Lib Ctx
+ *dml2 = dml2_allocate_memory();
+
+ if (!(*dml2))
+ return false;
+
+ dml2_init(in_dc, config, dml2);
- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true;
}
@@ -779,3 +793,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
return true;
}
+
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2)
+{
+
+ dml2_init(in_dc, config, dml2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index ee0eb184eb6d..4a8bd2f4195e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -71,6 +71,7 @@ struct dml2_dcn_clocks {
struct dml2_dc_callbacks {
struct dc *dc;
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
+ void (*build_test_pattern_params)(struct resource_context *res_ctx, struct pipe_ctx *otg_master);
bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context);
bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm);
bool (*update_pipes_for_stream_with_slice_count)(
@@ -86,8 +87,23 @@ struct dml2_dc_callbacks {
const struct dc_plane_state *plane,
int slice_count);
int (*get_odm_slice_index)(const struct pipe_ctx *opp_head);
+ int (*get_odm_slice_count)(const struct pipe_ctx *opp_head);
int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe);
+ int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe);
struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx);
+ struct pipe_ctx *(*get_otg_master_for_stream)(
+ struct resource_context *res_ctx,
+ const struct dc_stream_state *stream);
+ int (*get_opp_heads_for_otg_master)(const struct pipe_ctx *otg_master,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *opp_heads[MAX_PIPES]);
+ int (*get_dpp_pipes_for_plane)(const struct dc_plane_state *plane,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *dpp_pipes[MAX_PIPES]);
+ struct dc_stream_status *(*get_stream_status)(
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ struct dc_stream_state *(*get_stream_from_id)(const struct dc_state *state, unsigned int id);
};
struct dml2_dc_svp_callbacks {
@@ -96,10 +112,10 @@ struct dml2_dc_svp_callbacks {
struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *main_stream);
- struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+ struct dc_plane_state* (*create_phantom_plane)(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane);
- enum dc_status (*add_phantom_stream)(struct dc *dc,
+ enum dc_status (*add_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream);
@@ -108,7 +124,7 @@ struct dml2_dc_svp_callbacks {
struct dc_stream_state *stream,
struct dc_plane_state *plane_state,
struct dc_state *context);
- enum dc_status (*remove_phantom_stream)(struct dc *dc,
+ enum dc_status (*remove_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
void (*release_phantom_plane)(const struct dc *dc,
@@ -121,6 +137,15 @@ struct dml2_dc_svp_callbacks {
enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
+ bool (*remove_phantom_streams_and_planes)(
+ const struct dc *dc,
+ struct dc_state *state);
+ void (*release_phantom_streams_and_planes)(
+ const struct dc *dc,
+ struct dc_state *state);
+ unsigned int (*calculate_mall_ways_from_bytes)(
+ const struct dc *dc,
+ unsigned int total_size_in_mall_bytes);
};
struct dml2_clks_table_entry {
@@ -191,6 +216,8 @@ struct dml2_configuration_options {
unsigned int max_segments_per_hubp;
unsigned int det_segment_size;
bool map_dc_pipes_with_callbacks;
+
+ bool use_clock_dc_limits;
};
/*
@@ -214,6 +241,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2);
bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
@@ -241,6 +271,7 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
*/
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
+ struct dml2_context *dml2,
bool fast_validate);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/Makefile b/drivers/gpu/drm/amd/display/dc/dpp/Makefile
new file mode 100644
index 000000000000..99bd36073561
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/Makefile
@@ -0,0 +1,77 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'dpp' sub-component of DAL.
+#
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+DPP_DCN10 = dcn10_dpp.o dcn10_dpp_dscl.o dcn10_dpp_cm.o
+
+AMD_DAL_DPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn10/,$(DPP_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN10)
+
+###############################################################################
+
+DPP_DCN20 = dcn20_dpp.o dcn20_dpp_cm.o
+
+AMD_DAL_DPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn20/,$(DPP_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN20)
+
+###############################################################################
+
+DPP_DCN201 = dcn201_dpp.o
+
+AMD_DAL_DPP_DCN201 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn201/,$(DPP_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN201)
+
+###############################################################################
+
+DPP_DCN30 = dcn30_dpp.o dcn30_dpp_cm.o
+
+AMD_DAL_DPP_DCN30 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn30/,$(DPP_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN30)
+
+###############################################################################
+
+DPP_DCN32 = dcn32_dpp.o
+
+AMD_DAL_DPP_DCN32 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn32/,$(DPP_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN32)
+
+###############################################################################
+
+DPP_DCN35 = dcn35_dpp.o
+
+AMD_DAL_DPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn35/,$(DPP_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN35)
+
+###############################################################################
+
+endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt
new file mode 100644
index 000000000000..1318c6fba3e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt
@@ -0,0 +1,6 @@
+dal3_subdirectory_sources(
+ dcn10_dpp.c
+ dcn10_dpp_cm.c
+ dcn10_dpp_dscl.c
+ dcn10_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index ef52e6b6eccf..e1da48b05d00 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
@@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
.dpp_program_blnd_lut = NULL,
.dpp_program_shaper_lut = NULL,
- .dpp_program_3dlut = NULL
+ .dpp_program_3dlut = NULL,
+ .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap,
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index c9e045666dcc..c48139bed11f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1090,7 +1090,8 @@
type DPP_CLOCK_ENABLE; \
type CM_HDR_MULT_COEF; \
type CUR0_FP_BIAS; \
- type CUR0_FP_SCALE;
+ type CUR0_FP_SCALE;\
+ type DISPCLK_R_GATE_DISABLE;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
@@ -1521,4 +1522,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
const struct dcn_dpp_registers *tf_regs,
const struct dcn_dpp_shift *tf_shift,
const struct dcn_dpp_mask *tf_mask);
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
index 904c2d278998..006e23842016 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
@@ -28,9 +28,9 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
-#include "dcn10_cm_common.h"
+#include "dcn10/dcn10_cm_common.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
@@ -98,7 +98,7 @@ static void program_gamut_remap(
if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
- CM_GAMUT_REMAP_MODE, 0);
+ CM_GAMUT_REMAP_MODE, 0);
return;
}
switch (select) {
@@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn10_dpp *dpp,
+ uint16_t *regval,
+ enum gamut_remap_select *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ REG_GET(CM_GAMUT_REMAP_CONTROL,
+ CM_GAMUT_REMAP_MODE, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMB_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint16_t arr_reg_val[12] = {0};
+ enum gamut_remap_select select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
const uint16_t *regval)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
index 5ca9ab8a76e8..808bca9fb804 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt
new file mode 100644
index 000000000000..9c2d7096348e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt
@@ -0,0 +1,5 @@
+dal3_subdirectory_sources(
+ dcn20_dpp.c
+ dcn20_dpp_cm.c
+ dcn20_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
index eaa7032f0f1a..56ebd7164dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn20_dpp.h"
+#include "dcn20/dcn20_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
@@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base,
REG_GET(DPP_CONTROL,
DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // Degamma LUT (RAM)
REG_GET(CM_DGAM_CONTROL,
- CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
- // BGAM has no ROM, and definition is different, can't reuse same dump
- //REG_GET(CM_BLNDGAM_CONTROL,
- // CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode);
- REG_GET(CM_GAMUT_REMAP_CONTROL,
- CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
- if (s->gamut_remap_mode) {
- s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
- s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
- s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
- s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
- s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
- s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
- }
+ CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+
+ // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode,
+ CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_SIZE, &s->lut3d_size);
+
+ // Blend/Out Gamma (RAM)
+ REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK,
+ CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode);
}
void dpp2_power_on_obuf(
@@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
};
static struct dpp_caps dcn20_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index e735363d0051..49cb25c9cb36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -736,7 +736,7 @@ bool dpp20_program_shaper(
bool dpp20_program_3dlut(
struct dpp *dpp_base,
- struct tetrahedral_params *params);
+ const struct tetrahedral_params *params);
void dpp2_cnv_set_alpha_keyer(
struct dpp *dpp_base,
@@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,
void dpp2_power_on_obuf(
struct dpp *dpp_base,
bool power_on);
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
index 598caa508d43..31613372e214 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn20_dpp.h"
+#include "dcn20/dcn20_dpp.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
@@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn20_dpp *dpp,
+ uint16_t *regval,
+ enum dcn20_gamut_remap_select *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == DCN2_GAMUT_REMAP_COEF_B) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ uint16_t arr_reg_val[12] = {0};
+ enum dcn20_gamut_remap_select select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == DCN2_GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
void dpp2_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
@@ -1059,15 +1114,15 @@ static void dpp20_select_3dlut_ram_mask(
bool dpp20_program_3dlut(
struct dpp *dpp_base,
- struct tetrahedral_params *params)
+ const struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
- struct dc_rgb *lut0;
- struct dc_rgb *lut1;
- struct dc_rgb *lut2;
- struct dc_rgb *lut3;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt
new file mode 100644
index 000000000000..7711cd3c47a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn201_dpp.c
+ dcn201_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c
index a7268027a472..345202fee40f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn201_dpp.h"
+#include "dcn201/dcn201_dpp.h"
#include "basics/conversion.h"
#define REG(reg)\
@@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
};
static struct dpp_caps dcn201_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h
index cbd5b47b4acf..cbd5b47b4acf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt
new file mode 100644
index 000000000000..0faee2a1e32b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt
@@ -0,0 +1,5 @@
+dal3_subdirectory_sources(
+ dcn30_dpp.c
+ dcn30_dpp_cm.c
+ dcn30_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 11f7746f3a65..f8c0cee34080 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -26,9 +26,9 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -44,12 +44,45 @@
void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t gamcor_lut_mode, rgam_lut_mode;
REG_GET(DPP_CONTROL,
- DPP_CLOCK_ENABLE, &s->is_enabled);
+ DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // Pre-degamma (ROM)
+ REG_GET_2(PRE_DEGAM,
+ PRE_DEGAM_MODE, &s->pre_dgam_mode,
+ PRE_DEGAM_SELECT, &s->pre_dgam_select);
+
+ // Gamma Correction (RAM)
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode);
+ if (s->gamcor_mode) {
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode);
+ if (!gamcor_lut_mode)
+ s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
- // TODO: Implement for DCN3
+ // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_SIZE, &s->lut3d_size);
+
+ // Blend/Out Gamma (RAM)
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode);
+ if (s->rgam_lut_mode){
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode);
+ if (!rgam_lut_mode)
+ s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -260,9 +293,11 @@ void dpp3_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
@@ -286,9 +321,11 @@ void dpp3_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
+ alpha_en = 0;
break;
default:
break;
@@ -1351,15 +1388,15 @@ static void dpp3_select_3dlut_ram_mask(
}
static bool dpp3_program_3dlut(struct dpp *dpp_base,
- struct tetrahedral_params *params)
+ const struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
- struct dc_rgb *lut0;
- struct dc_rgb *lut1;
- struct dc_rgb *lut2;
- struct dc_rgb *lut3;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
@@ -1462,6 +1499,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index cea3208e4ab1..269f437c1633 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -132,6 +132,8 @@
SRI(CM_POST_CSC_B_C33_C34, CM, id), \
SRI(CM_MEM_PWR_CTRL, CM, id), \
SRI(CM_CONTROL, CM, id), \
+ SRI(CM_TEST_DEBUG_INDEX, CM, id), \
+ SRI(CM_TEST_DEBUG_DATA, CM, id), \
SRI(FORMAT_CONTROL, CNVC_CFG, id), \
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -294,6 +296,7 @@
TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \
+ TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
@@ -426,6 +429,7 @@
type CM_GAMCOR_LUT_DATA; \
type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \
type CM_GAMCOR_LUT_READ_COLOR_SEL; \
+ type CM_GAMCOR_LUT_READ_DBG; \
type CM_GAMCOR_LUT_HOST_SEL; \
type CM_GAMCOR_LUT_CONFIG_MODE; \
type CM_GAMCOR_LUT_STATUS; \
@@ -637,4 +641,6 @@ void dpp3_program_cm_dealpha(
struct dpp *dpp_base,
uint32_t enable, uint32_t additive_blending);
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
index 5f97a868ada3..82eca0e7b7d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
@@ -26,9 +26,9 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -405,3 +405,57 @@ void dpp3_cm_set_gamut_remap(
program_gamut_remap(dpp, arr_reg_val, gamut_mode);
}
}
+
+static void read_gamut_remap(struct dcn3_dpp *dpp,
+ uint16_t *regval,
+ int *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ //current coefficient set in use
+ REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint16_t arr_reg_val[12] = {0};
+ int select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt
new file mode 100644
index 000000000000..7743edc4599f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn32_dpp.c
+ dcn32_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index dcf12a0b031c..41679997b44d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn32_dpp.h"
+#include "dcn32/dcn32_dpp.h"
#include "basics/conversion.h"
#include "dcn30/dcn30_cm_common.h"
@@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
index 572958d287eb..572958d287eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt
new file mode 100644
index 000000000000..91df5db26435
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn35_dpp.c
+ dcn35_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
new file mode 100644
index 000000000000..e16274fee31d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dcn35/dcn35_dpp.h"
+#include "reg_helper.h"
+
+#define REG(reg) dpp->tf_regs->reg
+
+#define CTX dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \
+ ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name
+
+void dpp35_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (enable) {
+ if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL,
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE_2(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 1,
+ DISPCLK_R_GATE_DISABLE, 1);
+ } else
+ REG_UPDATE_2(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 0,
+ DISPCLK_R_GATE_DISABLE, 0);
+}
+
+static struct dpp_funcs dcn35_dpp_funcs = {
+ .dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
+ .dpp_read_state = dpp30_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = NULL,
+ .dpp_set_pre_degam = dpp3_set_pre_degam,
+ .dpp_program_input_lut = NULL,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp3_cnv_setup,
+ .dpp_program_degamma_pwl = NULL,
+ .dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
+ .dpp_program_cm_bias = dpp3_program_cm_bias,
+
+ .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP
+ .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
+ .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
+
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp3_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp35_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+};
+
+
+bool dpp35_construct(
+ struct dcn3_dpp *dpp, struct dc_context *ctx,
+ uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn35_dpp_shift *tf_shift,
+ const struct dcn35_dpp_mask *tf_mask)
+{
+ bool ret = dpp32_construct(dpp, ctx, inst, tf_regs,
+ (const struct dcn3_dpp_shift *)(tf_shift),
+ (const struct dcn3_dpp_mask *)(tf_mask));
+
+ dpp->base.funcs = &dcn35_dpp_funcs;
+ return ret;
+}
+
+void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable)
+{
+ REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
index 09b84307cd9e..135872d88219 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
@@ -31,7 +31,9 @@
#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \
DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \
- TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh)
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DISPCLK_R_GATE_DISABLE, mask_sh)
#define DPP_REG_FIELD_LIST_DCN35(type) \
struct { \
@@ -47,6 +49,11 @@ struct dcn35_dpp_mask {
DPP_REG_FIELD_LIST_DCN35(uint32_t);
};
+void dpp35_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable);
+
bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx,
uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
const struct dcn35_dpp_shift *tf_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0df6c55eb326..150ef23440a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
kbps = apply_128b_132b_stream_overhead(timing, kbps);
+ if (link_encoding == DC_LINK_ENCODING_HDMI_FRL &&
+ timing->vic == 0 && timing->hdmi_vic == 0 &&
+ timing->frl_uncompressed_video_bandwidth_in_kbps != 0)
+ kbps = timing->frl_uncompressed_video_bandwidth_in_kbps;
+
return kbps;
}
@@ -453,7 +458,7 @@ bool dc_dsc_compute_bandwidth_range(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
- struct dc_dsc_config config;
+ struct dc_dsc_config config = {0};
struct dc_dsc_config_options options = {0};
options.dsc_min_slice_height_override = dsc_min_slice_height_override;
@@ -863,9 +868,9 @@ static bool setup_dsc_config(
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
- int max_slices_h;
- int min_slices_h;
- int num_slices_h;
+ int max_slices_h = 0;
+ int min_slices_h = 0;
+ int num_slices_h = 0;
int pic_width;
int slice_width;
int target_bpp;
@@ -1050,7 +1055,12 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- dsc_cfg->num_slices_v = pic_height/slice_height;
+ if (slice_height > 0) {
+ dsc_cfg->num_slices_v = pic_height / slice_height;
+ } else {
+ is_dsc_possible = false;
+ goto done;
+ }
if (target_bandwidth_kbps > 0) {
is_dsc_possible = decide_dsc_target_bpp_x16(
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 36d6c1646a51..59864130cf83 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -101,7 +101,6 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
{
int ret;
struct drm_dsc_config dsc_cfg;
- unsigned long long tmp;
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
@@ -112,9 +111,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
- tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
- do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
- dsc_params->bytes_per_pixel = (uint32_t)tmp;
+ dsc_params->bytes_per_pixel =
+ (uint32_t)(div_u64(((uint64_t)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1)),
+ (uint32_t)dsc_cfg.slice_width)); /* Round-up */
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
index d734e3a134d1..2840ed5c57d8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -95,10 +95,6 @@ static bool offset_to_id(
return true;
default:
ASSERT_CRITICAL(false);
-#ifdef PALLADIUM_SUPPORTED
- *en = GPIO_DDC_LINE_DDC1;
- return true;
-#endif
return false;
}
break;
@@ -184,11 +180,6 @@ static bool offset_to_id(
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exista */
-#ifdef PALLADIUM_SUPPORTED
- *id = GPIO_ID_HPD;
- *en = GPIO_DDC_LINE_DDC1;
- return true;
-#endif
ASSERT_CRITICAL(false);
return false;
}
@@ -308,10 +299,6 @@ static bool id_to_offset(
break;
default:
ASSERT_CRITICAL(false);
-#ifdef PALLADIUM_SUPPORTED
- info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
- result = true;
-#endif
result = false;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 3ede6e02c3a7..663c17f52779 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -128,7 +128,7 @@ struct gpio *dal_gpio_service_create_irq(
uint32_t offset,
uint32_t mask)
{
- enum gpio_id id;
+ enum gpio_id id = 0;
uint32_t en;
if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
@@ -144,7 +144,7 @@ struct gpio *dal_gpio_service_create_generic_mux(
uint32_t offset,
uint32_t mask)
{
- enum gpio_id id;
+ enum gpio_id id = 0;
uint32_t en;
struct gpio *generic;
@@ -178,7 +178,7 @@ struct gpio_pin_info dal_gpio_get_generic_pin_info(
enum gpio_id id,
uint32_t en)
{
- struct gpio_pin_info pin;
+ struct gpio_pin_info pin = {0};
if (service->translate.funcs->id_to_offset) {
service->translate.funcs->id_to_offset(id, en, &pin);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 279020535af7..8f1a95b77830 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -110,6 +110,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
+ case DCN_VERSION_3_51:
dal_hw_factory_dcn32_init(factory);
return true;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index d6b0a1af7d3e..37166b2b3fee 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -111,6 +111,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
+ case DCN_VERSION_3_51:
dal_hw_translate_dcn32_init(translate);
return true;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 25ffc052d53b..99e17c164ce7 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dm_helpers.h"
#include "include/hdcp_msg_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 254136f8e3f9..cf8aa23b4415 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -180,6 +180,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
+HWSS_DCN351 = dcn351_hwseq.o dcn351_init.o
+
+AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN351)
+
+###############################################################################
+
###############################################################################
endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 01493c49bd7a..0d3ea291eeee 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -249,7 +249,7 @@ static bool dce110_enable_display_power_gating(
return false;
}
-static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+static void dce110_prescale_params(struct ipp_prescale_params *prescale_params,
const struct dc_plane_state *plane_state)
{
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
@@ -289,16 +289,14 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (ipp == NULL)
return false;
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
+ tf = &plane_state->in_transfer_func;
- build_prescale_params(&prescale_params, plane_state);
+ dce110_prescale_params(&prescale_params, plane_state);
ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
- if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity &&
+ if (!plane_state->gamma_correction.is_identity &&
dce_use_lut(plane_state->format))
- ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
+ ipp->funcs->ipp_program_input_lut(ipp, &plane_state->gamma_correction);
if (tf == NULL) {
/* Default case if no input transfer function specified */
@@ -614,11 +612,10 @@ dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
xfm->funcs->opp_power_on_regamma_lut(xfm, true);
xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
- if (stream->out_transfer_func &&
- stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) {
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) {
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
- } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func,
+ } else if (dce110_translate_regamma_to_hw_format(&stream->out_transfer_func,
&xfm->regamma_params)) {
xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
@@ -1185,22 +1182,13 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
-
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- /* TODO: This looks like a bug to me as we are disabling HPO IO when
- * we are just disabling a single HPO stream. Shouldn't we disable HPO
- * HW control only when HPOs for all streams are disabled?
- */
- if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
- pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
- pipe_ctx->stream->ctx->dc->hwseq, false);
- }
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1291,6 +1279,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
+static void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ const struct dc_link *link = stream->link;
+ struct fixed31_32 link_bw_kbps;
+
+ dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings);
+ dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+ dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count;
+ dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate;
+
+ link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link,
+ &pipe_ctx->link_config.dp_link_settings));
+
+ /* For audio stream calculations, the video stream should not include FEC or SSC
+ * in order to get the most pessimistic values.
+ */
+ if (dp_link_info->encoding == DP_8b_10b_ENCODING &&
+ link->dc->link_srv->dp_is_fec_supported(link)) {
+ link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+ dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100));
+ } else if (dp_link_info->encoding == DP_128b_132b_ENCODING) {
+ link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+ dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/
+ }
+
+ dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps);
+
+ /* HW minimum for 128b/132b HBlank is 4 frame symbols.
+ * TODO: Plumb the actual programmed HBlank min symbol width to here.
+ */
+ if (dp_link_info->encoding == DP_128b_132b_ENCODING)
+ dp_link_info->hblank_min_symbol_width = 4;
+ else
+ dp_link_info->hblank_min_symbol_width = 0;
+}
+
static void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
@@ -1338,6 +1366,15 @@ static void build_audio_output(
audio_output->crtc_info.calculated_pixel_clock_100Hz =
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
+ audio_output->crtc_info.pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ audio_output->crtc_info.dsc_bits_per_pixel =
+ stream->timing.dsc_cfg.bits_per_pixel;
+
+ audio_output->crtc_info.dsc_num_slices =
+ stream->timing.dsc_cfg.num_slices_h;
+
/*for HDMI, audio ACR is with deep color ratio factor*/
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&
audio_output->crtc_info.requested_pixel_clock_100Hz ==
@@ -1371,6 +1408,10 @@ static void build_audio_output(
audio_output->pll_info.ss_percentage =
pipe_ctx->pll_settings.ss_percentage;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info);
+ }
}
static void program_scaler(const struct dc *dc,
@@ -1496,7 +1537,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
}
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
+ struct audio_output audio_output = {0};
build_audio_output(context, pipe_ctx, &audio_output);
@@ -1507,7 +1548,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
&audio_output.crtc_info,
- &pipe_ctx->stream->audio_info);
+ &pipe_ctx->stream->audio_info,
+ &audio_output.dp_link_info);
}
/* make sure no pipes syncd to the pipe being enabled */
@@ -2146,7 +2188,7 @@ static void dce110_setup_audio_dto(
struct dc *dc,
struct dc_state *context)
{
- int i;
+ unsigned int i;
/* program audio wall clock. use HDMI as clock source if HDMI
* audio active. Otherwise, use DP as clock source
@@ -2218,7 +2260,7 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
+ struct audio_output audio_output = {0};
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2233,6 +2275,19 @@ static void dce110_setup_audio_dto(
}
}
+static bool dce110_is_hpo_enabled(struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
@@ -2241,6 +2296,8 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
+ bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state);
+ bool is_hpo_enabled = dce110_is_hpo_enabled(context);
/* reset syncd pipes from disabled pipes */
if (dc->config.use_pipe_ctx_sync_logic)
@@ -2283,6 +2340,10 @@ enum dc_status dce110_apply_ctx_to_hw(
dce110_setup_audio_dto(dc, context);
+ if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) {
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled);
+ }
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 6dd479e8a348..0c4aef8ffe2c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -283,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
DTN_INFO("\n");
}
-void dcn10_log_hw_state(struct dc *dc,
- struct dc_log_buffer_ctx *log_ctx)
+static void dcn10_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
int i;
- DTN_INFO_BEGIN();
-
- dcn10_log_hubbub_state(dc, log_ctx);
-
- dcn10_log_hubp_states(dc, log_ctx);
-
- DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
- " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
- "C31 C32 C33 C34\n");
+ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
for (i = 0; i < pool->pipe_count; i++) {
struct dpp *dpp = pool->dpps[i];
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
- "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
+ DTN_INFO("[%2d]: %11xh %11s %9s %9s"
+ " %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
dpp->inst,
s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
@@ -329,19 +329,45 @@ void dcn10_log_hw_state(struct dc *dc,
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
- s.gamut_remap_mode,
- s.gamut_remap_c11_c12,
- s.gamut_remap_c13_c14,
- s.gamut_remap_c21_c22,
- s.gamut_remap_c23_c24,
- s.gamut_remap_c31_c32,
- s.gamut_remap_c33_c34);
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
DTN_INFO("\n");
}
DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -352,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc,
s.idle);
}
DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO_BEGIN();
+
+ dcn10_log_hubbub_state(dc, log_ctx);
+
+ dcn10_log_hubp_states(dc, log_ctx);
+
+ if (dc->hwss.log_color_state)
+ dc->hwss.log_color_state(dc, log_ctx);
+ else
+ dcn10_log_color_state(dc, log_ctx);
DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
@@ -1316,6 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
bool can_apply_seamless_boot = false;
+ bool tg_enabled[MAX_PIPES] = {false};
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
@@ -1397,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
+ tg_enabled[i] = true;
continue;
}
@@ -1438,6 +1490,20 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+ /* Clean up MPC tree */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (tg_enabled[i]) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
+ int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
+
+ if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ }
+ }
+ }
+ }
+
/* Power gate DSCs */
if (hws->funcs.dsc_pg_control != NULL) {
uint32_t num_opps = 0;
@@ -1763,14 +1829,12 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL)
return false;
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
+ tf = &plane_state->in_transfer_func;
- if (plane_state->gamma_correction &&
- !dpp_base->ctx->dc->debug.always_use_regamma
- && !plane_state->gamma_correction->is_identity
+ if (!dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction.is_identity
&& dce_use_lut(plane_state->format))
- dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
+ dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
if (tf == NULL)
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
@@ -1811,7 +1875,7 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
#define MAX_NUM_HW_POINTS 0x200
static void log_tf(struct dc_context *ctx,
- struct dc_transfer_func *tf, uint32_t hw_points_num)
+ const struct dc_transfer_func *tf, uint32_t hw_points_num)
{
// DC_LOG_GAMMA is default logging of all hw points
// DC_LOG_ALL_GAMMA logs all points, not only hw points
@@ -1840,21 +1904,23 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ if (!stream)
+ return false;
+
if (dpp == NULL)
return false;
dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
- if (stream->out_transfer_func &&
- stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
dpp,
@@ -1862,10 +1928,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream != NULL && stream->ctx != NULL &&
- stream->out_transfer_func != NULL) {
+ if (stream->ctx) {
log_tf(stream->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
}
@@ -2120,7 +2185,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
- unsigned int pclk;
+ unsigned int pclk = 0;
uint32_t embedded_pix_clk_100hz;
uint16_t embedded_h_total;
@@ -2211,7 +2276,7 @@ void dcn10_enable_vblanks_synchronization(
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
- int i, width, height, master;
+ int i, width = 0, height = 0, master;
DC_LOGGER_INIT(dc_ctx->logger);
@@ -2277,7 +2342,7 @@ void dcn10_enable_timing_synchronization(
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
- int i, width, height;
+ int i, width = 0, height = 0;
DC_LOGGER_INIT(dc_ctx->logger);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 931ac8ed7069..7d833fa6dd77 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -71,6 +71,112 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+void dcn20_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
+ " 3DLUT size RGAM mode GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (!s.is_enabled)
+ continue;
+
+ DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ dpp->inst,
+ (s.dgam_lut_mode == 0) ? "Bypass" :
+ ((s.dgam_lut_mode == 1) ? "sRGB" :
+ ((s.dgam_lut_mode == 2) ? "Ycc" :
+ ((s.dgam_lut_mode == 3) ? "RAM" :
+ ((s.dgam_lut_mode == 4) ? "RAM" :
+ "Unknown")))),
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_lut_mode == 1) ? "RAM A" :
+ ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
+
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
+ " OGAM mode\n");
+
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %9s\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle,
+ (s.rgam_mode == 1) ? "RAM A" :
+ ((s.rgam_mode == 2) ? "RAM B" :
+ "Bypass"));
+ }
+ DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
+
static int find_free_gsl_group(const struct dc *dc)
{
if (dc->res_pool->gsl_groups.gsl_0 == 0)
@@ -297,7 +403,7 @@ void dcn20_init_blank(
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
@@ -767,6 +873,22 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+
+ if (dccg->funcs->set_dtbclk_p_src)
+ dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+
if (dc_is_hdmi_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
@@ -853,22 +975,6 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
-
- if (dccg->funcs->set_dtbclk_p_src)
- dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst);
-
- dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
- dto_params.timing = &pipe_ctx->stream->timing;
- dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- }
-
return DC_OK;
}
@@ -905,7 +1011,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
/*
* program OGAM only for the top pipe
* if there is a pipe split then fix diagnostic is required:
@@ -916,19 +1022,19 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (mpc->funcs->power_on_mpc_mem_pwr)
mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->top_pipe == NULL
- && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm_helper_translate_curve_to_hw_format(dc->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/*
* there is no ROM
*/
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
/*
@@ -944,17 +1050,15 @@ bool dcn20_set_blend_lut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *blend_lut = NULL;
-
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- blend_lut = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- blend_lut = &dpp_base->regamma_params;
- }
+ const struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
@@ -966,24 +1070,21 @@ bool dcn20_set_shaper_3dlut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *shaper_lut = NULL;
-
- if (plane_state->in_shaper_func) {
- if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
- shaper_lut = &plane_state->in_shaper_func->pwl;
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- shaper_lut = &dpp_base->shaper_params;
- }
+ const struct pwl_params *shaper_lut = NULL;
+
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ shaper_lut = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
}
result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->state.bits.initialized == 1)
+ if (plane_state->lut3d_func.state.bits.initialized == 1)
result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
- &plane_state->lut3d_func->lut_3d);
+ &plane_state->lut3d_func.lut_3d);
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
@@ -1006,15 +1107,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc,
hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
hws->funcs.set_blend_lut(pipe_ctx, plane_state);
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
-
-
- if (tf == NULL) {
- dpp_base->funcs->dpp_set_degamma(dpp_base,
- IPP_DEGAMMA_MODE_BYPASS);
- return true;
- }
+ tf = &plane_state->in_transfer_func;
if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS)
use_degamma_ram = true;
@@ -1392,6 +1485,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
return;
}
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@@ -1445,10 +1543,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
/* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
- /* Detect odm changes */
- if (resource_is_odm_topology_changed(new_pipe, old_pipe))
- new_pipe->update_flags.bits.odm = 1;
-
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@@ -1633,6 +1727,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.clip_size_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
@@ -1645,6 +1740,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
@@ -1808,9 +1904,11 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
}
- if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
- dc->res_pool->hubbub->funcs->program_det_size(
- dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -1891,19 +1989,20 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
+ struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -1958,7 +2057,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -1971,19 +2069,31 @@ void dcn20_program_front_end_for_ctx(
* turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
* DET allocation.
*/
- if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
@@ -2022,17 +2132,6 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
-
- /* when dynamic ODM is active, pipes must be reconfigured when all planes are
- * disabled, as some transitions will leave software and hardware state
- * mismatched.
- */
- if (dc->debug.enable_single_display_2to1_odm_policy &&
- pipe->stream &&
- pipe->update_flags.bits.disable &&
- !pipe->prev_odm_pipe &&
- hws->funcs.update_odm)
- hws->funcs.update_odm(dc, context, pipe);
}
}
@@ -2344,7 +2443,7 @@ bool dcn20_wait_for_blank_complete(
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
@@ -2355,7 +2454,7 @@ bool dcn20_wait_for_blank_complete(
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
@@ -2785,11 +2884,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- if (dc->hwseq->funcs.setup_hpo_hw_control)
- dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
- }
-
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index d950b3e54ec2..5c874f7b0683 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -28,6 +28,8 @@
#include "hw_sequencer_private.h"
+void dcn20_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
bool dcn20_set_shaper_3dlut(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 884e3e323338..ef6488165b8f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -67,6 +67,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
+ .log_color_state = dcn20_log_color_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index d5769f38874f..6be846635a79 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -167,7 +167,7 @@ void dcn201_init_blank(
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 7252f5f781f0..804be977ea47 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -66,7 +66,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c
int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- struct dcn_hubbub_phys_addr_config config;
+ struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index c34c13e1e0a4..ed9141a67db3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -69,21 +69,168 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+void dcn30_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
+ " 3DLUT mode 3DLUT bit depth 3DLUT size RGAM mode"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (!s.is_enabled)
+ continue;
+
+ DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
+ " %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ dpp->inst,
+ s.pre_dgam_mode,
+ (s.pre_dgam_select == 0) ? "sRGB" :
+ ((s.pre_dgam_select == 1) ? "Gamma 2.2" :
+ ((s.pre_dgam_select == 2) ? "Gamma 2.4" :
+ ((s.pre_dgam_select == 3) ? "Gamma 2.6" :
+ ((s.pre_dgam_select == 4) ? "BT.709" :
+ ((s.pre_dgam_select == 5) ? "PQ" :
+ ((s.pre_dgam_select == 6) ? "HLG" :
+ "Unknown")))))),
+ (s.gamcor_mode == 0) ? "Bypass" :
+ ((s.gamcor_mode == 1) ? "RAM A" :
+ "RAM B"),
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_lut_mode == 0) ? "Bypass" :
+ ((s.rgam_lut_mode == 1) ? "RAM A" :
+ "RAM B"),
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
+
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
+ " SHAPER mode 3DLUT mode 3DLUT bit-depth 3DLUT size OGAM mode OGAM LUT"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ mpc3_get_gamut_remap(pool->mpc, i, &s.gamut_remap);
+
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %11s %11s %16s %11s %10s %9s"
+ " %-12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle,
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_mode == 0) ? "Bypass" :
+ ((s.rgam_mode == 2) ? "RAM" :
+ "Unknown"),
+ (s.rgam_mode == 1) ? "B" : "A",
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+
+ }
+ DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
bool dcn30_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *blend_lut = NULL;
-
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- blend_lut = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(
- plane_state->blend_tf, &dpp_base->regamma_params, false);
- blend_lut = &dpp_base->regamma_params;
- }
+ const struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(
+ &plane_state->blend_tf, &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
@@ -151,27 +298,24 @@ bool dcn30_set_input_transfer_func(struct dc *dc,
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
enum dc_transfer_func_predefined tf;
bool result = true;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
if (dpp_base == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
- if (plane_state->in_transfer_func &&
- plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
- tf = plane_state->in_transfer_func->tf;
+ if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func.tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
- if (plane_state->in_transfer_func) {
- if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
- params = &plane_state->in_transfer_func->pwl;
- else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
- cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
- &dpp_base->degamma_params, false))
- params = &dpp_base->degamma_params;
- }
+ if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func.pwl;
+ else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
@@ -229,24 +373,24 @@ bool dcn30_set_output_transfer_func(struct dc *dc,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (pipe_ctx->top_pipe == NULL) {
/*program rmu shaper and 3dlut in MPC*/
ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
- if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ if (ret == false && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
@@ -655,7 +799,7 @@ void dcn30_init_hw(struct dc *dc)
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -663,10 +807,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL)
return;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
+
+ /* Wait for two frame to make sure AV mute is sent out */
+ if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+ }
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
@@ -731,7 +885,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
uint32_t tmr_delay = 0, tmr_scale = 0;
- struct dc_cursor_attributes cursor_attr;
+ struct dc_cursor_attributes cursor_attr = {0};
bool cursor_cache_enable = false;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
@@ -787,7 +941,8 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
plane->address.page_table_base.quad_part == 0 &&
dc->hwss.does_plane_fit_in_mall &&
- dc->hwss.does_plane_fit_in_mall(dc, plane,
+ dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch,
+ plane->plane_size.surface_size.height, plane->format,
cursor_cache_enable ? &cursor_attr : NULL)) {
unsigned int v_total = stream->adjust.v_total_max ?
stream->adjust.v_total_max : stream->timing.v_total;
@@ -917,11 +1072,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
-bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
+bool dcn30_does_plane_fit_in_mall(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr)
{
// add meta size?
- unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ unsigned int surface_size = pitch * height *
+ (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
unsigned int mall_size = dc->caps.mall_size_total;
unsigned int cursor_size = 0;
@@ -1015,21 +1174,3 @@ void dcn30_prepare_bandwidth(struct dc *dc,
if (!dc->clk_mgr->clks.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
}
-
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params)
-{
- unsigned int i;
- unsigned int triggers = 0;
-
- if (params->triggers.surface_update)
- triggers |= 0x100;
- if (params->triggers.cursor_update)
- triggers |= 0x8;
- if (params->triggers.force_trigger)
- triggers |= 0x1;
-
- for (i = 0; i < num_pipes; i++)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
- triggers, params->num_frames);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index e557e2b98618..76b16839486a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup(
unsigned int num_dwb,
struct dc_writeback_info *wb_info);
+void dcn30_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+
bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -68,7 +71,10 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
-bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane,
+bool dcn30_does_plane_fit_in_mall(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
@@ -90,7 +96,4 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params);
-
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 9894caedffed..ef913445a795 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 7423880fabb6..1c8abb417b6e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -98,10 +98,8 @@ static void enable_memory_low_power(struct dc *dc)
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
if (dc->res_pool->stream_enc[i]->vpg)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
-#endif
}
}
@@ -275,7 +273,7 @@ void dcn31_init_hw(struct dc *dc)
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
void dcn31_dsc_pg_control(
@@ -481,7 +479,7 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- struct dcn_hubbub_phys_addr_config config;
+ struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
@@ -617,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}
+
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x100;
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
index edfc01d6ad73..b8bc939da155 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
@@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc,
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+
+
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 669f524bd064..c06cc2c5da92 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 3a9cc8ac0c07..0d8a05cf8b1a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -69,29 +69,6 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -105,7 +82,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index ccb7e317e86a..542ce3b7f9e4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index aa36d7a56ca8..b8e884368dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -239,8 +239,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
// Convert number of cache lines required to number of ways
if (dc->debug.force_mall_ss_num_ways > 0) {
num_ways = dc->debug.force_mall_ss_num_ways;
+ } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
} else {
- num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);
+ num_ways = 0;
}
return num_ways;
@@ -261,7 +263,9 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
for (i = 0; i < dc->current_state->stream_count; i++) {
/* MALL SS messaging is not supported with PSR at this time */
if (dc->current_state->streams[i] != NULL &&
- dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off &&
+ dc->current_state->stream_status[i].plane_count > 0)))
return false;
}
@@ -475,39 +479,35 @@ bool dcn32_set_mcm_luts(
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = true;
- struct pwl_params *lut_params = NULL;
+ const struct pwl_params *lut_params = NULL;
// 1D LUT
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = &dpp_base->regamma_params;
- }
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = &dpp_base->regamma_params;
}
result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
lut_params = NULL;
// Shaper
- if (plane_state->in_shaper_func) {
- if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func->pwl;
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- ASSERT(false);
- cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = &dpp_base->shaper_params;
- }
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ ASSERT(false);
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = &dpp_base->shaper_params;
}
result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
// 3D
- if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1)
- result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id);
+ if (plane_state->lut3d_func.state.bits.initialized == 1)
+ result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
else
result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
@@ -524,27 +524,24 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
enum dc_transfer_func_predefined tf;
bool result = true;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
if (mpc == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
- if (plane_state->in_transfer_func &&
- plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
- tf = plane_state->in_transfer_func->tf;
+ if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func.tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
- if (plane_state->in_transfer_func) {
- if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
- params = &plane_state->in_transfer_func->pwl;
- else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
- cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
- &dpp_base->degamma_params, false))
- params = &dpp_base->degamma_params;
- }
+ if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func.pwl;
+ else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
@@ -562,24 +559,24 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
/*program shaper and 3dlut in MPC*/
ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
- if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ if (ret == false && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
@@ -956,39 +953,16 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
if (dc->ctx->dmub_srv->dmub->fw_version <
- DMUB_FW_VERSION(7, 0, 35)) {
+ DMUB_FW_VERSION(7, 0, 35)) {
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -1015,7 +989,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -1103,10 +1077,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -1119,20 +1089,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -1156,6 +1112,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
dsc->funcs->dsc_disconnect(dsc);
}
}
+
+ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+ /*
+ * blank pattern is generated by OPP, reprogram blank pattern
+ * due to OPP count change
+ */
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
@@ -1579,7 +1542,7 @@ void dcn32_init_blank(
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
uint32_t i;
/* program opp dpg blank color */
@@ -1778,3 +1741,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index 069e20bc87c0..f55c11fc56ec 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index e8ac94a005b8..67d661dbd5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -58,14 +58,14 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 8b6c49622f3b..5295f52e4fc8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -349,7 +349,7 @@ void dcn35_init_hw(struct dc *dc)
if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
if (dc->res_pool->pg_cntl) {
@@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -396,7 +373,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -536,6 +495,17 @@ void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
}
}
+void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
+{
+ if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ return;
+
+ if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
+ hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
+ hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
+ }
+}
+
void dcn35_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
@@ -679,22 +649,43 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
- struct dc_link *edp_links[MAX_NUM_EDP];
- int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
if (enable) {
- dc_get_edp_links(dc, edp_links, &edp_num);
- if (edp_num == 0 || edp_num > 1)
- return false;
+ uint32_t num_active_edp = 0;
+ int i;
for (i = 0; i < dc->current_state->stream_count; ++i) {
struct dc_stream_state *stream = dc->current_state->streams[i];
+ struct dc_link *link = stream->link;
+ bool is_psr = link && !link->panel_config.psr.disable_psr &&
+ (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+
+ /* Ignore streams that disabled. */
+ if (stream->dpms_off)
+ continue;
+
+ /* Active external displays block idle optimizations. */
+ if (!dc_is_embedded_signal(stream->signal))
+ return false;
- if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ /* If not PWRSEQ0 can't enter idle optimizations */
+ if (link && link->link_index != 0)
return false;
+
+ /* Check for panel power features required for idle optimizations. */
+ if (!is_psr && !is_replay)
+ return false;
+
+ num_active_edp += 1;
}
+
+ /* If more than one active eDP then disallow. */
+ if (num_active_edp > 1)
+ return false;
}
// TODO: review other cases when idle optimization is allowed
@@ -720,6 +711,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
struct hubbub *hubbub = dc->res_pool->hubbub;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
bool can_apply_seamless_boot = false;
+ bool tg_enabled[MAX_PIPES] = {false};
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
@@ -801,6 +793,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
+ tg_enabled[i] = true;
continue;
}
@@ -842,6 +835,20 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+ /* Clean up MPC tree */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (tg_enabled[i]) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
+ int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
+
+ if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ }
+ }
+ }
+ }
+
if (pg_cntl != NULL) {
if (pg_cntl->funcs->dsc_pg_control != NULL) {
uint32_t num_opps = 0;
@@ -1002,6 +1009,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
+ if (hpo_frl_stream_enc_acquired)
+ update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+
update_state->pg_res_update[PG_DWB] = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1019,8 +1029,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
- if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) &&
- pipe_ctx->plane_res.mpcc_inst >= 0)
+ if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
if (pipe_ctx->stream_res.dsc)
@@ -1028,6 +1037,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
}
/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
@@ -1085,6 +1097,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (j == PG_OPTC && new_pipe->stream_res.tg)
update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
+
+ if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
}
} else if (cur_pipe->plane_state == new_pipe->plane_state ||
cur_pipe == new_pipe) {
@@ -1114,6 +1129,11 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
new_pipe->stream_res.tg)
update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
+
+ if (j == PG_DPSTREAM &&
+ cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
+ new_pipe->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
}
}
}
@@ -1129,6 +1149,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
+ if (hpo_frl_stream_enc_acquired)
+ update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+
}
/**
@@ -1253,14 +1276,19 @@ void dcn35_root_clock_control(struct dc *dc,
if (!pg_cntl)
return;
/*enable root clock first when power up*/
- if (power_on)
+ if (power_on) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
}
+ if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
+ if (dc->hwseq->funcs.dpstream_root_clock_control)
+ dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
+
+ }
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (power_on) {
@@ -1273,14 +1301,19 @@ void dcn35_root_clock_control(struct dc *dc,
}
}
/*disable root clock first when power down*/
- if (!power_on)
+ if (!power_on) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
}
+ if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
+ if (dc->hwseq->funcs.dpstream_root_clock_control)
+ dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
+
+ }
}
void dcn35_prepare_bandwidth(
@@ -1321,29 +1354,13 @@ void dcn35_optimize_bandwidth(
}
}
-void dcn35_set_idle_state(const struct dc *dc, bool allow_idle)
-{
- // TODO: Find a more suitable communcation
- if (dc->clk_mgr->funcs->set_idle_state)
- dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle);
-}
-
-uint32_t dcn35_get_idle_state(const struct dc *dc)
-{
- // TODO: Find a more suitable communcation
- if (dc->clk_mgr->funcs->get_idle_state)
- return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr);
-
- return 0;
-}
-
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust)
{
int i = 0;
struct drr_params params = {0};
- // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
- unsigned int event_triggers = 0x800;
+ // DRR set trigger event mapped to OTG_TRIG_A
+ unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
// Note DRR trigger events are generated regardless of whether num frames met.
unsigned int num_frames = 2;
@@ -1377,3 +1394,48 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
}
}
}
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x200;/*bit 9 : 10 0000 0000*/
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;/*bit3*/
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
+
+void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
+ int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
+{
+ int i = 0;
+ struct long_vtotal_params params = {0};
+
+ params.vertical_total_max = v_total_max;
+ params.vertical_total_min = v_total_min;
+
+ for (i = 0; i < num_pipes; i++) {
+ if (!pipe_ctx[i])
+ continue;
+
+ if (pipe_ctx[i]->stream) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+
+ if (timing)
+ params.vertical_blank_start = timing->v_total - timing->v_front_porch;
+ else
+ params.vertical_blank_start = 0;
+
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
+ pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
+ pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, &params);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index fd66316e33de..a731c8880d60 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -37,6 +37,8 @@ void dcn35_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool pow
void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
+void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on);
+
void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable);
@@ -84,10 +86,13 @@ void dcn35_dsc_pg_control(
unsigned int dsc_inst,
bool power_on);
-void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
-uint32_t dcn35_get_idle_state(const struct dc *dc);
-
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust);
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+
+void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
+ int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a630aa77dcec..df3bf77f3fb4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -70,7 +70,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
@@ -121,8 +121,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
+ .set_long_vtotal = dcn35_set_long_vblank,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
@@ -148,6 +147,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
//.hubp_pg_control = dcn35_hubp_pg_control,
.enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn35_update_odm,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
deleted file mode 100644
index 951ca2da4486..000000000000
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-dal3_subdirectory_sources(
- dcn351_init.c
- dcn351_init.h
-)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
index b24ad27fe6ef..a4b3c1e99ec6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
@@ -1,16 +1,27 @@
#
-# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+# Copyright (c) 2022-2024 Advanced Micro Devices, Inc.
#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
#
-# Authors: AMD
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
#
# Makefile for DCN351.
-DCN351 = dcn351_init.o
+DCN351 = dcn351_hwseq.o dcn351_init.o
AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c
new file mode 100644
index 000000000000..93fe5b262a3d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "resource.h"
+#include "dcn351_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#define DC_LOGGER_INIT(logger) \
+ struct dal_logger *dc_logger = logger
+
+#define DC_LOGGER \
+ dc_logger
+
+void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state)
+{
+ int i, j;
+
+ dcn35_calc_blocks_to_gate(dc, context, update_state);
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ !update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+
+ break;
+ }
+ }
+}
+
+void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state)
+{
+ int i, j;
+
+ dcn35_calc_blocks_to_ungate(dc, context, update_state);
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+
+ break;
+ }
+ }
+}
+
+/**
+ * dcn351_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn351_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+
+ if (!pg_cntl || dc->debug.ignore_pg)
+ return;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
+ }
+
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
+ }
+ }
+
+ // domain25 currently always on.
+
+ /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
+
+ // domain23 currently always on.
+ // domain22 currently always on.
+}
+
+/**
+ * dcn351_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 11, DCPG 19: dsc3
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn351_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+
+ if (!pg_cntl || dc->debug.ignore_pg)
+ return;
+
+ // domain22 currently always on.
+ // domain23 currently always on.
+
+ /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+ // domain25 currently always on.
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
+ }
+
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h
index 3341ef71009b..6d8f3bfb668e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Advanced Micro Devices, Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,30 +24,18 @@
*
*/
-#include "core_types.h"
-#include "dcn35_dpp.h"
-#include "reg_helper.h"
+#ifndef __DC_HWSS_DCN351_H__
+#define __DC_HWSS_DCN351_H__
-#define REG(reg) dpp->tf_regs->reg
+#include "hw_sequencer_private.h"
-#define CTX dpp->base.ctx
+void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state);
+void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state);
+void dcn351_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state);
+void dcn351_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state);
-#undef FN
-#define FN(reg_name, field_name) \
- ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \
- ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name
-
-bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx,
- uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
- const struct dcn35_dpp_shift *tf_shift,
- const struct dcn35_dpp_mask *tf_mask)
-{
- return dpp32_construct(dpp, ctx, inst, tf_regs,
- (const struct dcn3_dpp_shift *)(tf_shift),
- (const struct dcn3_dpp_mask *)(tf_mask));
-}
-
-void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable)
-{
- REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable);
-}
+#endif /* __DC_HWSS_DCN351_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 143d3fc0221c..a53092cd619b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -32,6 +32,7 @@
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
#include "dcn35/dcn35_hwseq.h"
+#include "dcn351/dcn351_hwseq.h"
#include "dcn351_init.h"
@@ -67,9 +68,9 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
@@ -120,8 +121,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
@@ -147,6 +146,7 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
//.hubp_pg_control = dcn35_hubp_pg_control,
.enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn35_update_odm,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 64ca7c66509b..7c339e7e7117 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -339,6 +339,8 @@ struct hw_sequencer_funcs {
/* HW State Logging Related */
void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+ void (*log_color_state)(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
void (*get_hw_state)(struct dc *dc, char *pBuf,
unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
@@ -375,7 +377,10 @@ struct hw_sequencer_funcs {
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
- bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane,
+ bool (*does_plane_fit_in_mall)(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
@@ -422,11 +427,10 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
- void (*set_idle_state)(const struct dc *dc, bool allow_idle);
- uint32_t (*get_idle_state)(const struct dc *dc);
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
+ void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
};
void color_space_to_black_color(
@@ -476,9 +480,10 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
- int *num_steps,
+ unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
- struct dc_stream_status *stream_status);
+ struct dc_stream_status *stream_status,
+ struct dc_state *context);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index b3c62a82cb1c..341219cf4144 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -120,6 +120,10 @@ struct hwseq_private_funcs {
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool clock_on);
+ void (*dpstream_root_clock_control)(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
void (*dpp_pg_control)(struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on);
@@ -155,7 +159,6 @@ struct hwseq_private_funcs {
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
@@ -170,7 +173,6 @@ struct hwseq_private_funcs {
struct dc_state *context,
struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
-#endif
void (*reset_back_end_for_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 3a6bf77a6873..028b2f971e36 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -90,6 +90,9 @@ struct resource_funcs {
void (*update_soc_for_wm_a)(
struct dc *dc, struct dc_state *context);
+ unsigned int (*calculate_mall_ways_from_bytes)(
+ const struct dc *dc,
+ unsigned int total_size_in_mall_bytes);
/**
* @populate_dml_pipes - Populate pipe data struct
*
@@ -336,7 +339,9 @@ struct stream_resource {
};
struct plane_resource {
+ /* scl_data is scratch space required to program a plane */
struct scaler_data scl_data;
+ /* Below pointers to hw objects are required to enable the plane */
struct hubp *hubp;
struct mem_input *mi;
struct input_pixel_processor *ipp;
@@ -496,7 +501,7 @@ struct dcn_bw_writeback {
struct dcn_bw_output {
struct dc_clocks clk;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
unsigned int mall_ss_size_bytes;
@@ -515,6 +520,7 @@ struct bw_context {
union bw_output bw;
struct display_mode_lib dml;
struct dml2_context *dml2;
+ struct dml2_context *dml2_dc_power_source;
};
struct dc_dmub_cmd {
@@ -604,16 +610,7 @@ struct dc_state {
unsigned int stutter_period_us;
} perf_params;
- struct {
- /* used to temporarily backup plane states of a stream during
- * dc update. The reason is that plane states are overwritten
- * with surface updates in dc update. Once they are overwritten
- * current state is no longer valid. We want to temporarily
- * store current value in plane states so we can still recover
- * a valid current state during dc update.
- */
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
- } scratch;
+ enum dc_power_source_type power_source;
};
struct replay_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 9e4ddc985240..55529c5f471c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,7 +31,7 @@
#define __DCN_CALCS_H__
#include "bw_fixed.h"
-#include "../dml/display_mode_lib.h"
+#include "dml/display_mode_lib.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
index 6ed1fb8c9300..b6203253111c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -43,7 +43,8 @@ struct audio_funcs {
void (*az_configure)(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info);
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info);
void (*wall_dto_setup)(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 17e014d3bdc8..4f7480f60c85 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -281,8 +281,6 @@ struct clk_mgr_funcs {
void (*set_low_power_state)(struct clk_mgr *clk_mgr);
void (*exit_low_power_state)(struct clk_mgr *clk_mgr);
bool (*is_ips_supported)(struct clk_mgr *clk_mgr);
- void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle);
- uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr);
void (*init_clocks)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 6f4c97543c14..4ba18ea57aad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -349,13 +349,14 @@ struct clk_mgr_internal {
enum dm_pp_clocks_state cur_min_clks_state;
bool periodic_retraining_disabled;
- unsigned int cur_phyclk_req_table[MAX_PIPES * 2];
+ unsigned int cur_phyclk_req_table[MAX_LINKS];
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
bool dpm_present;
+ bool pme_trigger_pending;
};
struct clk_mgr_internal_funcs {
@@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz)
return (khz + 999) / 1000;
}
+static inline int khz_to_mhz_floor(int khz)
+{
+ return khz / 1000;
+}
+
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index b9a06bf84cc9..d4c7885fc916 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -59,6 +59,7 @@ enum dentist_dispclk_change_mode {
struct dp_dto_params {
int otg_inst;
enum signal_type signal;
+ enum streamclk_source clk_src;
uint64_t pixclk_hz;
uint64_t refclk_hz;
};
@@ -105,6 +106,10 @@ struct dccg_funcs {
void (*otg_drop_pixel)(struct dccg *dccg,
uint32_t otg_inst);
void (*dccg_init)(struct dccg *dccg);
+ void (*set_dpstreamclk_root_clock_gating)(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool enable);
void (*set_dpstreamclk)(
struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 901891316dfb..305fdc127bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -26,6 +26,12 @@
#ifndef __DAL_DCHUBBUB_H__
#define __DAL_DCHUBBUB_H__
+/**
+ * DOC: overview
+ *
+ * There is only one common DCHUBBUB. It contains the common request and return
+ * blocks for the Data Fabric Interface that are not clock/power gated.
+ */
enum dcc_control {
dcc_control__256_256_xxx,
@@ -154,7 +160,7 @@ struct hubbub_funcs {
bool (*program_watermarks)(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index f4aa76e02518..ca8de345d039 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -27,6 +27,31 @@
#ifndef __DAL_DPP_H__
#define __DAL_DPP_H__
+/**
+ * DOC: overview
+ *
+ * The DPP (Display Pipe and Plane) block is the unified display data
+ * processing engine in DCN for processing graphic or video data on per DPP
+ * rectangle base. This rectangle can be a part of SLS (Single Large Surface),
+ * or a layer to be blended with other DPP, or a rectangle associated with a
+ * display tile.
+ *
+ * It provides various functions including:
+ * - graphic color keyer
+ * - graphic cursor compositing
+ * - graphic or video image source to destination scaling
+ * - image sharping
+ * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4
+ * - Color Space Conversion
+ * - Host LUT gamma adjustment
+ * - Color Gamut Remap
+ * - brightness and contrast adjustment.
+ *
+ * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color
+ * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module
+ * connected in a video/graphics pipeline.
+ */
+
#include "transform.h"
#include "cursor_reg_cache.h"
@@ -141,6 +166,7 @@ struct dcn_dpp_state {
uint32_t igam_input_format;
uint32_t dgam_lut_mode;
uint32_t rgam_lut_mode;
+ // gamut_remap data for dcn10_get_cm_states()
uint32_t gamut_remap_mode;
uint32_t gamut_remap_c11_c12;
uint32_t gamut_remap_c13_c14;
@@ -148,6 +174,16 @@ struct dcn_dpp_state {
uint32_t gamut_remap_c23_c24;
uint32_t gamut_remap_c31_c32;
uint32_t gamut_remap_c33_c34;
+ // gamut_remap data for dcn*_log_color_state()
+ struct dpp_grph_csc_adjustment gamut_remap;
+ uint32_t shaper_lut_mode;
+ uint32_t lut3d_mode;
+ uint32_t lut3d_bit_depth;
+ uint32_t lut3d_size;
+ uint32_t blnd_lut_mode;
+ uint32_t pre_dgam_mode;
+ uint32_t pre_dgam_select;
+ uint32_t gamcor_mode;
};
struct CM_bias_params {
@@ -286,10 +322,13 @@ struct dpp_funcs {
const struct pwl_params *params);
bool (*dpp_program_3dlut)(
struct dpp *dpp,
- struct tetrahedral_params *params);
+ const struct tetrahedral_params *params);
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
+
+ void (*dpp_get_gamut_remap)(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 729ca0064e94..063efc8128a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -147,9 +147,10 @@ struct dwb_caps {
unsigned int support_ogam :1;
unsigned int support_wbscl :1;
unsigned int support_ocsc :1;
- unsigned int support_stereo :1;
+ unsigned int support_stereo :1;
+ unsigned int support_4k_120p :1;
} caps;
- unsigned int reserved2[9]; /* Reserved for future use, MUST BE 0. */
+ unsigned int reserved2[10]; /* Reserved for future use, MUST BE 0. */
};
struct dwbc {
@@ -166,8 +167,9 @@ struct dwbc {
bool dwb_is_drc;
int wb_src_plane_inst;/*hubp, mpcc, inst*/
uint32_t mask_id;
- int otg_inst;
- bool mvc_cfg;
+ int otg_inst;
+ bool mvc_cfg;
+ struct dc_dwb_params params;
};
struct dwbc_funcs {
@@ -192,6 +194,10 @@ struct dwbc_funcs {
struct dwbc *dwbc,
enum dwb_frame_capture_enable enable);
+ void (*dwb_set_scaler)(
+ struct dwbc *dwbc,
+ struct dc_dwb_params *params);
+
void (*set_stereo)(
struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
@@ -205,9 +211,11 @@ struct dwbc_funcs {
struct dwbc *dwbc,
struct dwb_warmup_params *warmup_params);
-
+ bool (*dwb_get_mcifbuf_line)(
+ struct dwbc *dwbc, unsigned int *buf_idx,
+ unsigned int *cur_line,
+ unsigned int *over_run);
#if defined(CONFIG_DRM_AMD_DC_FP)
-
void (*dwb_program_output_csc)(
struct dwbc *dwbc,
enum dc_color_space color_space,
@@ -216,17 +224,17 @@ struct dwbc_funcs {
bool (*dwb_ogam_set_output_transfer_func)(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-
+#endif
//TODO: merge with output_transfer_func?
bool (*dwb_ogam_set_input_transfer_func)(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-#endif
+
+ void (*get_drr_time_stamp)(
+ struct dwbc *dwbc, uint32_t *time_stamp);
+
bool (*get_dwb_status)(
struct dwbc *dwbc);
- void (*dwb_set_scaler)(
- struct dwbc *dwbc,
- struct dc_dwb_params *params);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 7f3f9b69e903..72610cd7eae0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -26,13 +26,24 @@
#ifndef __DAL_HUBP_H__
#define __DAL_HUBP_H__
+/**
+ * DOC: overview
+ *
+ * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port
+ * (SDP) and DCN. This component has multiple features, such as memory
+ * arbitration, rotation, and cursor manipulation.
+ *
+ * There is one HUBP allocated per pipe, which fetches data and converts
+ * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved
+ * and fixed-depth streams of pixel data.
+ */
+
#include "mem_input.h"
#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
-
enum cursor_pitch {
CURSOR_PITCH_64_PIXELS = 0,
CURSOR_PITCH_128_PIXELS,
@@ -146,9 +157,7 @@ struct hubp_funcs {
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_hubp_post_enable)(struct hubp *hubp);
-#endif
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
void (*set_cursor_attributes)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index dcae23faeee3..c80ebb407add 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,10 +44,11 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
+#define MAX_LINKS (MAX_PIPES * 2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
-#define MAX_HPO_DP2_LINK_ENCODERS 2
+#define MAX_HPO_DP2_LINK_ENCODERS 4
struct gamma_curve {
uint32_t offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index dbe7afa9d3a2..af9183f5d69b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -163,12 +163,11 @@ struct link_encoder_funcs {
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
+
void (*set_dio_phy_mux)(
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
- void (*set_dig_output_mode)(
- struct link_encoder *enc, uint8_t pix_per_container);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index b72fb314d804..86c12cd6f47d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -50,11 +50,13 @@ struct dcn_watermarks {
uint32_t usr_retraining_ns;
};
-struct dcn_watermark_set {
- struct dcn_watermarks a;
- struct dcn_watermarks b;
- struct dcn_watermarks c;
- struct dcn_watermarks d;
+union dcn_watermark_set {
+ struct {
+ struct dcn_watermarks a;
+ struct dcn_watermarks b;
+ struct dcn_watermarks c;
+ struct dcn_watermarks d;
+ }; // legacy
};
struct dce_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 61a2406dcc53..34a398f23fc6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -23,13 +23,28 @@
*/
/**
- * DOC: mpc-overview
+ * DOC: overview
*
- * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline
* that performs blending of multiple planes, using global and per-pixel alpha.
* It also performs post-blending color correction operations according to the
* hardware capabilities, such as color transformation matrix and gamma 1D and
* 3D LUT.
+ *
+ * MPC receives output from all DPP pipes and combines them to multiple outputs
+ * supporting "M MPC inputs -> N MPC outputs" flexible composition
+ * architecture. It features:
+ *
+ * - Programmable blending structure to allow software controlled blending and
+ * cascading;
+ * - Programmable window location of each DPP in active region of display;
+ * - Combining multiple DPP pipes in one active region when a single DPP pipe
+ * cannot process very large surface;
+ * - Combining multiple DPP from different SLS with blending;
+ * - Stereo formats from single DPP in top-bottom or side-by-side modes;
+ * - Stereo formats from 2 DPPs;
+ * - Alpha blending of multiple layers from different DPP pipes;
+ * - Programmable background color;
*/
#ifndef __DC_MPCC_H__
@@ -83,34 +98,65 @@ enum mpcc_alpha_blend_mode {
/**
* struct mpcc_blnd_cfg - MPCC blending configuration
- *
- * @black_color: background color
- * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
- * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
- * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
- * @global_gain: used when blend mode considers both pixel alpha and plane
- * alpha value and assumes the global alpha value.
- * @global_alpha: plane alpha value
- * @overlap_only: whether overlapping of different planes is allowed
- * @bottom_gain_mode: blend mode for bottom gain setting
- * @background_color_bpc: background color for bpc
- * @top_gain: top gain setting
- * @bottom_inside_gain: blend mode for bottom inside
- * @bottom_outside_gain: blend mode for bottom outside
*/
struct mpcc_blnd_cfg {
- struct tg_color black_color; /* background color */
- enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */
- bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */
+ /**
+ * @black_color: background color.
+ */
+ struct tg_color black_color;
+
+ /**
+ * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE).
+ */
+ enum mpcc_alpha_blend_mode alpha_mode;
+
+ /**
+ * @pre_multiplied_alpha:
+ * Whether pixel color values were pre-multiplied by the alpha channel
+ * (MPCC_ALPHA_MULTIPLIED_MODE).
+ */
+ bool pre_multiplied_alpha;
+
+ /**
+ * @global_gain: Used when blend mode considers both pixel alpha and plane.
+ */
int global_gain;
+
+ /**
+ * @global_alpha: Plane alpha value.
+ */
int global_alpha;
+
+ /**
+ * @overlap_only: Whether overlapping of different planes is allowed.
+ */
bool overlap_only;
/* MPCC top/bottom gain settings */
+
+ /**
+ * @bottom_gain_mode: Blend mode for bottom gain setting.
+ */
int bottom_gain_mode;
+
+ /**
+ * @background_color_bpc: Background color for bpc.
+ */
int background_color_bpc;
+
+ /**
+ * @top_gain: Top gain setting.
+ */
int top_gain;
+
+ /**
+ * @bottom_inside_gain: Blend mode for bottom inside.
+ */
int bottom_inside_gain;
+
+ /**
+ * @bottom_outside_gain: Blend mode for bottom outside.
+ */
int bottom_outside_gain;
};
@@ -150,34 +196,58 @@ struct mpc_dwb_flow_control {
/**
* struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
- * @mpcc_id: MPCC physical instance
- * @dpp_id: DPP input to this MPCC
- * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
- * @blnd_cfg: the blending configuration for this MPCC
- * @sm_cfg: stereo mix setting for this MPCC
- * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
*
* This struct is used as a node in an MPC tree.
*/
struct mpcc {
- int mpcc_id; /* MPCC physical instance */
- int dpp_id; /* DPP input to this MPCC */
- struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
- struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
- struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
- bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
+ /**
+ * @mpcc_id: MPCC physical instance.
+ */
+ int mpcc_id;
+
+ /**
+ * @dpp_id: DPP input to this MPCC
+ */
+ int dpp_id;
+
+ /**
+ * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected.
+ */
+ struct mpcc *mpcc_bot;
+
+ /**
+ * @blnd_cfg: The blending configuration for this MPCC.
+ */
+ struct mpcc_blnd_cfg blnd_cfg;
+
+ /**
+ * @sm_cfg: stereo mix setting for this MPCC
+ */
+ struct mpcc_sm_cfg sm_cfg;
+
+ /**
+ * @shared_bottom:
+ *
+ * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+ */
+ bool shared_bottom;
};
/**
* struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
*
- * @opp_id: the OPP instance that owns this MPC tree
- * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
*
*/
struct mpc_tree {
- int opp_id; /* The OPP instance that owns this MPC tree */
- struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
+ /**
+ * @opp_id: The OPP instance that owns this MPC tree.
+ */
+ int opp_id;
+
+ /**
+ * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+ */
+ struct mpcc *opp_list;
};
struct mpc {
@@ -199,6 +269,13 @@ struct mpcc_state {
uint32_t overlap_only;
uint32_t idle;
uint32_t busy;
+ uint32_t shaper_lut_mode;
+ uint32_t lut3d_mode;
+ uint32_t lut3d_bit_depth;
+ uint32_t lut3d_size;
+ uint32_t rgam_mode;
+ uint32_t rgam_lut;
+ struct mpc_grph_gamut_adjustment gamut_remap;
};
/**
@@ -217,16 +294,20 @@ struct mpc_funcs {
* Only used for planes that are part of blending chain for OPP output
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be added to.
- * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
- * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
- * stereo mix must disable for the very bottom layer of the tree config.
- * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
- * [in] dpp_id - DPP instance for the plane to be added.
- * [in] mpcc_id - The MPCC physical instance to use for blending.
- *
- * Return: struct mpcc* - MPCC that was added.
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be added to.
+ * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * - [in] insert_above_mpcc - Insert new plane above this MPCC.
+ * If NULL, insert as bottom plane.
+ * - [in] dpp_id - DPP instance for the plane to be added.
+ * - [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return:
+ *
+ * struct mpcc* - MPCC that was added.
*/
struct mpcc* (*insert_plane)(
struct mpc *mpc,
@@ -243,11 +324,14 @@ struct mpc_funcs {
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be removed from.
- * [in/out] mpcc - MPCC to be removed from tree.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be removed from.
+ * - [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return:
+ *
+ * void
*/
void (*remove_mpcc)(
struct mpc *mpc,
@@ -260,9 +344,12 @@ struct mpc_funcs {
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
- * [in/out] mpc - MPC context.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ *
+ * Return:
+ *
+ * void
*/
void (*mpc_init)(struct mpc *mpc);
void (*mpc_init_single_inst)(
@@ -275,11 +362,14 @@ struct mpc_funcs {
* Update the blending configuration for a specified MPCC.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in] blnd_cfg - MPCC blending configuration.
- * [in] mpcc_id - The MPCC physical instance.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ * - [in] blnd_cfg - MPCC blending configuration.
+ * - [in] mpcc_id - The MPCC physical instance.
+ *
+ * Return:
+ *
+ * void
*/
void (*update_blending)(
struct mpc *mpc,
@@ -289,15 +379,18 @@ struct mpc_funcs {
/**
* @cursor_lock:
*
- * Lock cursor updates for the specified OPP.
- * OPP defines the set of MPCC that are locked together for cursor.
+ * Lock cursor updates for the specified OPP. OPP defines the set of
+ * MPCC that are locked together for cursor.
*
* Parameters:
- * [in] mpc - MPC context.
- * [in] opp_id - The OPP to lock cursor updates on
- * [in] lock - lock/unlock the OPP
*
- * Return: void
+ * - [in] mpc - MPC context.
+ * - [in] opp_id - The OPP to lock cursor updates on
+ * - [in] lock - lock/unlock the OPP
+ *
+ * Return:
+ *
+ * void
*/
void (*cursor_lock)(
struct mpc *mpc,
@@ -307,20 +400,25 @@ struct mpc_funcs {
/**
* @insert_plane_to_secondary:
*
- * Add DPP into secondary MPC tree based on specified blending position.
- * Only used for planes that are part of blending chain for DWB output
+ * Add DPP into secondary MPC tree based on specified blending
+ * position. Only used for planes that are part of blending chain for
+ * DWB output
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be added to.
- * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
- * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
- * stereo mix must disable for the very bottom layer of the tree config.
- * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
- * [in] dpp_id - DPP instance for the plane to be added.
- * [in] mpcc_id - The MPCC physical instance to use for blending.
- *
- * Return: struct mpcc* - MPCC that was added.
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be added to.
+ * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * - [in] insert_above_mpcc - Insert new plane above this MPCC. If
+ * NULL, insert as bottom plane.
+ * - [in] dpp_id - DPP instance for the plane to be added.
+ * - [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return:
+ *
+ * struct mpcc* - MPCC that was added.
*/
struct mpcc* (*insert_plane_to_secondary)(
struct mpc *mpc,
@@ -337,10 +435,14 @@ struct mpc_funcs {
* Remove a specified DPP from the 'secondary' MPC tree.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be removed from.
- * [in] mpcc - MPCC to be removed from tree.
- * Return: void
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be removed from.
+ * - [in] mpcc - MPCC to be removed from tree.
+ *
+ * Return:
+ *
+ * void
*/
void (*remove_mpcc_from_secondary)(
struct mpc *mpc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7617fabbd16e..d89c92370d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -23,6 +23,22 @@
*
*/
+/**
+ * DOC: overview
+ *
+ * The Output Plane Processor (OPP) block groups have functions that format
+ * pixel streams such that they are suitable for display at the display device.
+ * The key functions contained in the OPP are:
+ *
+ * - Adaptive Backlight Modulation (ABM)
+ * - Formatter (FMT) which provide pixel-by-pixel operations for format the
+ * incoming pixel stream.
+ * - Output Buffer that provide pixel replication, and overlapping.
+ * - Interface between MPC and OPTC.
+ * - Clock and reset generation.
+ * - CRC generation.
+ */
+
#ifndef __DAL_OPP_H__
#define __DAL_OPP_H__
@@ -321,6 +337,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 9a8bf6ec70ea..8d32e525f05a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -93,6 +93,8 @@ struct dcn_otg_state {
uint32_t vertical_interrupt1_line;
uint32_t vertical_interrupt2_en;
uint32_t vertical_interrupt2_line;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_double_buffer_control;
};
void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index a15efadb9183..75b9ec21f297 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -178,10 +178,6 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 9a00a99317b2..cd68ecc242c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -64,6 +64,12 @@ struct drr_params {
bool immediate_flip;
};
+struct long_vtotal_params {
+ uint32_t vertical_total_min;
+ uint32_t vertical_total_max;
+ uint32_t vertical_blank_start;
+};
+
#define LEFT_EYE_3D_PRIMARY_SURFACE 1
#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
@@ -182,9 +188,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
-#endif
void (*disable_phantom_crtc)(struct timing_generator *tg);
bool (*immediate_disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
@@ -333,6 +337,8 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h
new file mode 100644
index 000000000000..51da368f5c3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef __DC_VPG_H__
+#define __DC_VPG_H__
+
+struct dc_context;
+struct dc_info_packet;
+
+struct vpg;
+
+struct vpg_funcs {
+ void (*update_generic_info_packet)(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
+ void (*vpg_poweron)(
+ struct vpg *vpg);
+
+ void (*vpg_powerdown)(
+ struct vpg *vpg);
+};
+
+struct vpg {
+ const struct vpg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+#endif /* DC_INC_VPG_H_ */ \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 26fe81f213da..7ab8ba5e23ed 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -285,12 +285,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
- const bool is_alpm);
+ const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 77a60aa9f27b..361ad6b16b96 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -510,6 +510,17 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
/*
* Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in current resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_as_cur_sec_dpp(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
* pipe in any MPCC combine in current resource context.
* return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
* pipe idx of the free pipe
@@ -573,13 +584,6 @@ bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
-#if defined(CONFIG_DRM_AMD_DC_FP)
-struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
- const struct resource_context *res_ctx,
- const struct resource_pool *pool,
- const struct dc_link *link);
-#endif
-
void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
struct dc_state *context);
@@ -615,4 +619,10 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
+
+/* Setup dc callbacks for dml2
+ * @dc: the display core structure
+ * @dml2_options: struct to hold callbacks
+ */
+void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 076f667a82f6..2d4378780c1a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -170,4 +170,13 @@ IRQ_DCN35 = irq_service_dcn35.o
AMD_DAL_IRQ_DCN35= $(addprefix $(AMDDALPATH)/dc/irq/dcn35/,$(IRQ_DCN35))
-AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35) \ No newline at end of file
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35)
+
+###############################################################################
+# DCN 351
+###############################################################################
+IRQ_DCN351 = irq_service_dcn351.o
+
+AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 1c0d89e675da..bb576a9c5fdb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
info->ext_id);
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
- struct timing_generator *tg =
- dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ struct timing_generator *tg;
+
+ if (pipe_offset >= MAX_PIPES)
+ return false;
+
+ tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index e8baafa02443..916f0c974637 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 03c5e8ff8cbd..42cdfe6c3538 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
new file mode 100644
index 000000000000..7ec8e0de2f01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "../dce110/irq_service_dce110.h"
+
+
+#include "dcn/dcn_3_5_1_offset.h"
+#include "dcn/dcn_3_5_1_sh_mask.h"
+
+#include "irq_service_dcn351.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+static enum dc_irq_source to_dal_irq_source_dcn351(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC3_VLINE0;
+ case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC4_VLINE0;
+ case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC5_VLINE0;
+ case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC6_VLINE0;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
+ return DC_IRQ_SOURCE_DMCUB_OUTBOX;
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_DMUB(reg_name)\
+ BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\
+ REG_STRUCT[base + reg_num].enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[0] = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[1] = \
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
+ REG_STRUCT[base + reg_num].ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base + reg_num].ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\
+ REG_STRUCT[base].enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[0] = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[1] = \
+ ~reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
+ REG_STRUCT[base].ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base].ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+
+#define hpd_rx_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\
+
+#define pflip_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\
+
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\
+
+#define vblank_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\
+
+#define vline0_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\
+
+#define dmub_outbox_int_entry()\
+ IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \
+ DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs
+
+#define dummy_irq_entry(irqno) \
+ REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\
+
+#define i2c_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num)
+
+#define dp_sink_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num)
+
+#define gpio_pad_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num)
+
+#define dc_underflow_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW)
+
+static struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+#define dcn351_irq_init_part_1() {\
+ dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \
+ hpd_int_entry(0); \
+ hpd_int_entry(1); \
+ hpd_int_entry(2); \
+ hpd_int_entry(3); \
+ hpd_int_entry(4); \
+ hpd_rx_int_entry(0); \
+ hpd_rx_int_entry(1); \
+ hpd_rx_int_entry(2); \
+ hpd_rx_int_entry(3); \
+ hpd_rx_int_entry(4); \
+ i2c_int_entry(1); \
+ i2c_int_entry(2); \
+ i2c_int_entry(3); \
+ i2c_int_entry(4); \
+ i2c_int_entry(5); \
+ i2c_int_entry(6); \
+ dp_sink_int_entry(1); \
+ dp_sink_int_entry(2); \
+ dp_sink_int_entry(3); \
+ dp_sink_int_entry(4); \
+ dp_sink_int_entry(5); \
+ dp_sink_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \
+ pflip_int_entry(0); \
+ pflip_int_entry(1); \
+ pflip_int_entry(2); \
+ pflip_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \
+ gpio_pad_int_entry(0); \
+ gpio_pad_int_entry(1); \
+ gpio_pad_int_entry(2); \
+ gpio_pad_int_entry(3); \
+ gpio_pad_int_entry(4); \
+ gpio_pad_int_entry(5); \
+ gpio_pad_int_entry(6); \
+ gpio_pad_int_entry(7); \
+ gpio_pad_int_entry(8); \
+ gpio_pad_int_entry(9); \
+ gpio_pad_int_entry(10); \
+ gpio_pad_int_entry(11); \
+ gpio_pad_int_entry(12); \
+ gpio_pad_int_entry(13); \
+ gpio_pad_int_entry(14); \
+ gpio_pad_int_entry(15); \
+ gpio_pad_int_entry(16); \
+ gpio_pad_int_entry(17); \
+ gpio_pad_int_entry(18); \
+ gpio_pad_int_entry(19); \
+ gpio_pad_int_entry(20); \
+ gpio_pad_int_entry(21); \
+ gpio_pad_int_entry(22); \
+ gpio_pad_int_entry(23); \
+ gpio_pad_int_entry(24); \
+ gpio_pad_int_entry(25); \
+ gpio_pad_int_entry(26); \
+ gpio_pad_int_entry(27); \
+ gpio_pad_int_entry(28); \
+ gpio_pad_int_entry(29); \
+ gpio_pad_int_entry(30); \
+ dc_underflow_int_entry(1); \
+ dc_underflow_int_entry(2); \
+ dc_underflow_int_entry(3); \
+ dc_underflow_int_entry(4); \
+ dc_underflow_int_entry(5); \
+ dc_underflow_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \
+ dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \
+}
+
+#define dcn351_irq_init_part_2() {\
+ vupdate_no_lock_int_entry(0); \
+ vupdate_no_lock_int_entry(1); \
+ vupdate_no_lock_int_entry(2); \
+ vupdate_no_lock_int_entry(3); \
+ vblank_int_entry(0); \
+ vblank_int_entry(1); \
+ vblank_int_entry(2); \
+ vblank_int_entry(3); \
+ vline0_int_entry(0); \
+ vline0_int_entry(1); \
+ vline0_int_entry(2); \
+ vline0_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \
+ dmub_outbox_int_entry(); \
+}
+
+#define dcn351_irq_init() {\
+ dcn351_irq_init_part_1(); \
+ dcn351_irq_init_part_2(); \
+}
+
+static struct irq_source_info irq_source_info_dcn351[DAL_IRQ_SOURCES_NUMBER] = {0};
+
+static struct irq_service_funcs irq_service_funcs_dcn351 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn351
+};
+
+static void dcn351_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ struct dc_context *ctx = init_data->ctx;
+
+#define REG_STRUCT irq_source_info_dcn351
+ dcn351_irq_init();
+
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn351;
+ irq_service->funcs = &irq_service_funcs_dcn351;
+}
+
+struct irq_service *dal_irq_service_dcn351_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn351_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h
new file mode 100644
index 000000000000..4094631ffec6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2021 Advanced Micro Devices, Inc. */
+
+#ifndef __DAL_IRQ_SERVICE_DCN351_H__
+#define __DAL_IRQ_SERVICE_DCN351_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn351_create(
+ struct irq_service_init_data *init_data);
+
+#endif /* __DAL_IRQ_SERVICE_DCN351_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2d152b68a501..8d1a1cc94a8b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -61,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
}
}
-static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
-{
- return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_SQUARE_END);
-}
-
-static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
-{
- if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
- test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
- return true;
- else
- return false;
-}
-
static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
@@ -361,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- if (is_dp_phy_sqaure_pattern(test_pattern)) {
+ if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) {
test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
core_link_read_dpcd(
link,
@@ -623,6 +607,8 @@ bool dp_set_test_pattern(
if (pipe_ctx == NULL)
return false;
+ link->pending_test_pattern = test_pattern;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
@@ -643,12 +629,13 @@ bool dp_set_test_pattern(
/* Reset Test Pattern state */
link->test_pattern_enabled = false;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
return true;
}
/* Check for PHY Test Patterns */
- if (is_dp_phy_pattern(test_pattern)) {
+ if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
@@ -681,6 +668,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
if (p_link_settings != NULL)
dpcd_set_link_settings(link,
p_link_settings);
@@ -756,7 +744,7 @@ bool dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- if (is_dp_phy_sqaure_pattern(test_pattern))
+ if (IS_DP_PHY_SQUARE_PATTERN(test_pattern))
core_link_write_dpcd(link,
DP_LINK_SQUARE_PATTERN,
p_custom_pattern,
@@ -884,6 +872,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
}
return true;
@@ -895,7 +884,7 @@ void dp_set_preferred_link_settings(struct dc *dc,
{
int i;
struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
+ struct dc_stream_state *link_stream = 0;
struct dc_link_settings store_settings = *link_setting;
link->preferred_link_setting = store_settings;
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index fbcd8fb58ea8..c8c55f196f8d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -24,7 +24,6 @@
*/
#include "link_dp_trace.h"
#include "link/protocols/link_dpcd.h"
-#include "link.h"
void dp_trace_init(struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index f4633d3cf9b9..a1f72fe378ee 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -22,6 +22,16 @@
* Authors: AMD
*
*/
+
+/**
+ * DOC: overview
+ *
+ * Display Input Output (DIO), is the display input and output unit in DCN. It
+ * includes output encoders to support different display output, like
+ * DisplayPort, HDMI, DVI interface, and others. It also includes the control
+ * and status channels for these interfaces.
+ */
+
#ifndef __LINK_HWSS_DIO_H__
#define __LINK_HWSS_DIO_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index b659baa23147..348ea4cb832d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
if (tp_params == NULL)
return false;
- if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
- link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) {
+ if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
// Deprogram overrides from previous test pattern
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
- }
switch (tp_params->dp_phy_pattern) {
case DP_TEST_PATTERN_80BIT_CUSTOM:
if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,
pltpat_custom, tp_params->custom_pattern_size) != 0)
return false;
+ hw_tp_params.custom_pattern = tp_params->custom_pattern;
+ hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size;
break;
case DP_TEST_PATTERN_D102:
break;
@@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- return false;
-
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
- return false;
-
- return true;
+ return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index b621b97711b6..3e6c7be7e278 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
+ uint8_t clk_src = 0x4C;
+ uint8_t pattern = 0x4F; /* SQ128 */
+
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
- const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0};
- const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src};
+ const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src};
const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21};
const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21};
- const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F};
- const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern};
+ const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern};
const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20};
const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20};
const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20};
@@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
if (tp_params == NULL)
return false;
- if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN ||
- tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) {
+ if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) {
// Deprogram overrides from previously set square wave override
if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
link->current_test_pattern == DP_TEST_PATTERN_D102)
link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_exit_manual_automation_0[0],
sizeof(vendor_lttpr_exit_manual_automation_0));
- else
+ else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
return false;
@@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params);
- dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]);
-
return true;
}
@@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
- link_res->hpo_dp_link_enc->funcs->set_ffe(
- link_res->hpo_dp_link_enc,
- link_settings,
- lane_settings[0].FFE_PRESET.raw);
-
- // FFE is programmed when retimer is programmed for SQ128, but explicit
- // programming needed here as well in case FFE-only update is requested
- if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
- link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END)
- dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+ // Don't update our HW FFE when outputting phy test patterns
+ if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) {
+ // Directly program FIXED_VS retimer FFE for SQ128 override
+ if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) {
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+ }
+ } else {
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
+ link_settings,
+ lane_settings[0].FFE_PRESET.raw);
+ }
}
static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
@@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)
{
- if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- return false;
-
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
- return false;
-
- return true;
+ return requires_fixed_vs_pe_retimer_dio_link_hwss(link);
}
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 24153b0df503..0d523dc43d02 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -41,6 +41,7 @@
#include "protocols/link_dp_dpia.h"
#include "protocols/link_dp_phy.h"
#include "protocols/link_dp_training.h"
+#include "protocols/link_dp_dpia_bw.h"
#include "accessories/link_dp_trace.h"
#include "link_enc_cfg.h"
@@ -515,8 +516,8 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = {0};
- uint8_t link_bw_set;
- uint8_t link_rate_set;
+ uint8_t link_bw_set = 0;
+ uint8_t link_rate_set = 0;
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
@@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ /*
+ * If this is DP over USB4 link then we need to:
+ * - Enable BW ALLOC support on DPtx if applicable
+ */
+ if (dc->config.usb4_bw_alloc_support) {
+ if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
+ /* update with non reduced link cap if bw allocation mode is supported */
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate =
+ link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count =
+ link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+ }
+ }
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 3cbfbf8d107e..b53ad18dbfbc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -55,6 +55,8 @@
#include "dccg.h"
#include "clk_mgr.h"
#include "atomfirmware.h"
+#include "vpg.h"
+
#define DC_LOGGER \
dc_logger
#define DC_LOGGER_INIT(logger) \
@@ -67,7 +69,6 @@
#define RETIMER_REDRIVER_INFO(...) \
DC_LOG_RETIMER_REDRIVER( \
__VA_ARGS__)
-#include "dc/dcn30/dcn30_vpg.h"
#define MAX_MTP_SLOT_COUNT 64
#define LINK_TRAINING_ATTEMPTS 4
@@ -127,7 +128,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
link->link_enc->funcs->get_dig_frontend &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
- unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+ int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (fe != ENGINE_ID_UNKNOWN)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
@@ -725,7 +726,7 @@ static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
static void enable_mst_on_sink(struct dc_link *link, bool enable)
{
- unsigned char mstmCntl;
+ unsigned char mstmCntl = 0;
core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
if (enable)
@@ -803,7 +804,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -1575,7 +1576,7 @@ static bool write_128b_132b_sst_payload_allocation_table(
break;
}
} else {
- union dpcd_rev dpcdRev;
+ union dpcd_rev dpcdRev = {0};
if (core_link_read_dpcd(
link,
@@ -2119,7 +2120,7 @@ static enum dc_status enable_link_dp_mst(
struct pipe_ctx *pipe_ctx)
{
struct dc_link *link = pipe_ctx->stream->link;
- unsigned char mstm_cntl;
+ unsigned char mstm_cntl = 0;
/* sink signal type after MST branch is MST. Multiple MST sinks
* share one link. Link DP PHY is enable or training only once.
@@ -2197,6 +2198,64 @@ static enum dc_status enable_link(
static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
{
+ struct dc_link *link = stream->sink->link;
+ int req_bw = bw;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dpia_bw_alloc_config.bw_alloc_enabled)
+ return false;
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ int sink_index = 0;
+ int i = 0;
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == NULL)
+ continue;
+
+ if (stream->sink->sink_id != link->remote_sinks[i]->sink_id)
+ req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i];
+ else
+ sink_index = i;
+ }
+
+ link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
+ }
+
+ /* get dp overhead for dp tunneling */
+ link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ req_bw += link->dpia_bw_alloc_config.dp_overhead;
+
+ if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) {
+ if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) {
+ DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+ __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.dp_overhead);
+ } else {
+ // Cannot get the required bandwidth.
+ DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+ __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.dp_overhead);
+ return false;
+ }
+ } else {
+ DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__);
+ return false;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ int i = 0;
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == NULL)
+ continue;
+ DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__,
+ (const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]),
+ link->dpia_bw_alloc_config.remote_sink_req_bw[i]);
+ }
+ }
+
return true;
}
@@ -2227,6 +2286,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ enum dp_panel_mode panel_mode_dp = dp_get_panel_mode(link);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2253,6 +2313,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false);
+
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 5b0bc7f6a188..1aed55b0ab6a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
struct dc_crtc_timing outputTiming = *timing;
-#if defined(CONFIG_DRM_AMD_DC_FP)
if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
-#endif
if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 289f5d133342..a01d0842bf8e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -992,7 +992,7 @@ enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link
static void read_dp_device_vendor_id(struct dc_link *link)
{
- struct dp_device_vendor_id dp_id;
+ struct dp_device_vendor_id dp_id = {0};
/* read IEEE branch device id */
core_link_read_dpcd(
@@ -1087,7 +1087,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
+ uint8_t det_caps[16] = {0}; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -1172,7 +1172,7 @@ static void get_active_converter_info(
set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
{
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision = {0};
core_link_read_dpcd(
link,
@@ -1242,7 +1242,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
{
- uint8_t dpcd_data[16];
+ uint8_t dpcd_data[16] = {0};
uint32_t read_dpcd_retry_cnt = 3;
enum dc_status status = DC_ERROR_UNEXPECTED;
union dp_downstream_port_present ds_port = { 0 };
@@ -1408,7 +1408,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
static void retrieve_cable_id(struct dc_link *link)
{
- union dp_cable_id usbc_cable_id;
+ union dp_cable_id usbc_cable_id = {0};
link->dpcd_caps.cable_id.raw = 0;
core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
@@ -1475,7 +1475,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
+ uint8_t lttpr_dpcd_data[8] = {0};
enum dc_status status;
bool is_lttpr_present;
@@ -1931,8 +1931,8 @@ void detect_edp_sink_caps(struct dc_link *link)
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
- uint8_t backlight_adj_cap;
- uint8_t general_edp_cap;
+ uint8_t backlight_adj_cap = 0;
+ uint8_t general_edp_cap = 0;
retrieve_link_cap(link);
link->dpcd_caps.edp_supported_link_rates_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 5491b707cec8..0f1c411523a2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -166,7 +166,7 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
uint8_t idx = 0xFF;
int i;
- for (i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_LINKS; ++i) {
if (!dc_struct->links[i] ||
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
@@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
+ for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
@@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+ if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
@@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
ret = true;
init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+
+ /*
+ * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
+ * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
+ * to make the CM to release preallocation and update estimated BW correctly for
+ * all DPIAs per host router
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index ba69874be5a4..0fcf0b8530ac 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -120,7 +120,7 @@ bool dp_parse_link_loss_status(
static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
- union dpcd_psr_configuration psr_configuration;
+ union dpcd_psr_configuration psr_configuration = {0};
if (!link->psr_settings.psr_feature_enabled)
return false;
@@ -186,9 +186,9 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
static void handle_hpd_irq_replay_sink(struct dc_link *link)
{
- union dpcd_replay_configuration replay_configuration;
+ union dpcd_replay_configuration replay_configuration = {0};
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
- union psr_error_status replay_error_status;
+ union psr_error_status replay_error_status = {0};
if (!link->replay_settings.replay_feature_enabled)
return;
@@ -280,7 +280,7 @@ void dp_handle_link_loss(struct dc_link *link)
static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
{
enum dc_status retval;
- union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
retval = core_link_read_dpcd(
link,
@@ -320,7 +320,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
/* Read 14 bytes in a single read and then copy only the required fields.
* This is more efficient than doing it in two separate AUX reads. */
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1] = {0};
retval = core_link_read_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index 0050e0a06cbc..2fa4e64e2430 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -37,6 +37,7 @@
#include "clk_mgr.h"
#include "resource.h"
#include "link_enc_cfg.h"
+#include "atomfirmware.h"
#define DC_LOGGER \
link->ctx->logger
@@ -100,8 +101,11 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ // Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
- !is_immediate_downstream(link, offset))
+ !is_immediate_downstream(link, offset) &&
+ (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 16a62e018712..1818970b8eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1071,7 +1071,7 @@ enum dc_status dpcd_set_link_settings(
* MUX chip gets link rate set back before link training.
*/
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- uint8_t supported_link_rates[16];
+ uint8_t supported_link_rates[16] = {0};
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
@@ -1508,10 +1508,7 @@ enum link_training_result dp_perform_link_training(
* Non-LT AUX transactions inside training mode.
*/
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
- if (link->dc->config.use_old_fixed_vs_sequence)
- status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, &lt_settings);
- else
- status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
else if (encoding == DP_128b_132b_ENCODING)
@@ -1590,21 +1587,7 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
- if (panel_mode == DP_PANEL_MODE_EDP) {
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
- if (cp_psp && cp_psp->funcs.enable_assr) {
- /* ASSR is bound to fail with unsigned PSP
- * verstage used during devlopment phase.
- * Report and continue with eDP panel mode to
- * perform eDP link training with right settings
- */
- bool result;
- result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
- if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- }
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode, true);
dp_set_panel_mode(link, panel_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 5d36bab0029c..edb21d21952a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -291,7 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
uint32_t retry_count = 0;
uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */
@@ -617,7 +617,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
uint32_t retries_eq = 0;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
enum dc_dp_training_pattern tr_pattern;
uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 7087cdc9e977..b5cf75975fff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -186,356 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
return status;
}
-
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = 0;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
- const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
- const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
- const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
- const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
- const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
- uint8_t toggle_rate;
- uint8_t rate;
-
- /* Only 8b/10b is supported */
- ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
- return status;
- }
-
- if (offset != 0xFF) {
- if (offset == 2) {
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
-
- /* Certain display and cable configuration require extra delay */
- } else if (offset > 2) {
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
- }
- }
-
- /* Vendor specific: Reset lane settings */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- /* Vendor specific: Enable intercept */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
-
-
- /* 1. set link rate, lane count and spread. */
-
- downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
- if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = rate;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
-
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_dpmf[0],
- sizeof(vendor_lttpr_write_data_dpmf));
-
- if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
- }
-
- /* 2. Perform link training */
-
- /* Perform Clock Recovery Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- const uint8_t max_vendor_dpcd_retries = 10;
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- uint8_t i = 0;
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings */
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- 0);
-
- /* 2. update DPCD of the receiver */
- if (!retry_count) {
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.
- */
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- 0);
- /* Vendor specific: Disable intercept */
- for (i = 0; i < max_vendor_dpcd_retries; i++) {
- if (pre_disable_intercept_delay_ms != 0)
- msleep(pre_disable_intercept_delay_ms);
- if (link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis)))
- break;
-
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
- }
- } else {
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- dpcd_set_lane_settings(
- link,
- lt_settings,
- 0);
- }
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 6. max VS reached*/
- if (dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings */
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient
- */
- if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- status = dp_get_cr_failure(lane_count, dpcd_lane_status);
- }
-
- /* Perform Channel EQ Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
-
- status = LINK_TRAINING_EQ_FAIL_EQ;
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
-
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, 0);
- else
- dpcd_set_lane_settings(link, lt_settings, 0);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_EQ_FAIL_CR;
- break;
- }
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
- }
-
- return status;
-}
-
enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -620,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
rate = get_dpcd_link_rate(&lt_settings->link_settings);
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
- if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
- link->vendor_specific_lttpr_link_rate_wa = rate;
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+ }
core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
index c0d6ea329504..e61970e27661 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
@@ -28,11 +28,6 @@
#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
#include "link_dp_training.h"
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings);
-
enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index fc50931c2aec..a72c898b64fa 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -130,7 +130,7 @@ static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint3
* XXX: Do not allow any two address ranges in this array to overlap
*/
static const struct dpcd_address_range mandatory_dpcd_blocks[] = {
- { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }};
+ { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_128B132B_RATES }};
/*
* extend addresses to read all mandatory blocks together
@@ -164,7 +164,7 @@ static void dpcd_extend_address_range(
if (new_addr_range.start != in_address || new_addr_range.end != end_address) {
*out_address = new_addr_range.start;
*out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end);
- *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL);
+ *out_data = kcalloc(*out_size, sizeof(**out_data), GFP_KERNEL);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 046d3e205415..ad9aca790dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -38,6 +38,7 @@
#include "dc/dc_dmub_srv.h"
#include "dce/dmub_replay.h"
#include "abm.h"
+#include "resource.h"
#define DC_LOGGER \
link->ctx->logger
#define DC_LOGGER_INIT(logger)
@@ -287,7 +288,7 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if < 1 nits or > 5000, it might be wrong readback
+ // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel.
if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000;
@@ -320,8 +321,8 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
struct dc_link_settings link_setting;
- uint8_t link_bw_set;
- uint8_t link_rate_set;
+ uint8_t link_bw_set = 0;
+ uint8_t link_rate_set = 0;
uint32_t req_bw;
union lane_count_set lane_count_set = {0};
@@ -892,7 +893,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
- if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
+ if (replay != NULL && link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
@@ -1033,7 +1035,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1054,7 +1056,7 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
}
bool edp_replay_residency(const struct dc_link *link,
- unsigned int *residency, const bool is_start, const bool is_alpm)
+ unsigned int *residency, const bool is_start, const enum pr_residency_mode mode)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1063,8 +1065,11 @@ bool edp_replay_residency(const struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
+ if (!residency)
+ return false;
+
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm);
+ replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode);
else
*residency = 0;
@@ -1072,7 +1077,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1144,3 +1149,66 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+
+static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
+ struct link_resource *link_res, bool enable)
+{
+ union dmub_rb_cmd cmd;
+ bool use_hpo_dp_link_enc = false;
+ uint8_t link_enc_index = 0;
+ uint8_t phy_type = 0;
+ uint8_t phy_id = 0;
+
+ if (!pDC->config.use_assr_psp_message)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ if (link_res->hpo_dp_link_enc) {
+ link_enc_index = link_res->hpo_dp_link_enc->inst;
+ use_hpo_dp_link_enc = true;
+ }
+
+ if (enable)
+ phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0);
+
+ phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter);
+
+ cmd.assr_enable.header.type = DMUB_CMD__PSP;
+ cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE;
+ cmd.assr_enable.assr_data.enable = enable;
+ cmd.assr_enable.assr_data.phy_port_type = phy_type;
+ cmd.assr_enable.assr_data.phy_port_id = phy_id;
+ cmd.assr_enable.assr_data.link_enc_index = link_enc_index;
+ cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc;
+
+ dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx,
+ enum dp_panel_mode *panel_mode, bool enable)
+{
+ struct link_resource *link_res = &pipe_ctx->link_res;
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+
+ if (*panel_mode != DP_PANEL_MODE_EDP)
+ return;
+
+ if (link->dc->config.use_assr_psp_message) {
+ edp_set_assr_enable(link->dc, link, link_res, enable);
+ } else if (cp_psp && cp_psp->funcs.enable_assr && enable) {
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ bool result;
+
+ result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
+
+ if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
+ *panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 34e521af7bb4..cb6d95cc36e4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
- unsigned int *residency, const bool is_start, const bool is_alpm);
+ unsigned int *residency, const bool is_start, const enum pr_residency_mode mode);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
@@ -76,4 +76,6 @@ bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
void edp_set_panel_power(struct dc_link *link, bool powerOn);
+void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx,
+ enum dp_panel_mode *panel_mode, bool enable);
#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
index e3d729ab5b9f..caa617883f62 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -35,7 +35,7 @@
bool link_get_hpd_state(struct dc_link *link)
{
- uint32_t state;
+ uint32_t state = 0;
dal_gpio_lock_pin(link->hpd_gpio);
dal_gpio_get_value(link->hpd_gpio, &state);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 0e8f4f36c87c..5574bc628053 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -945,10 +945,19 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
- }
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
}
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
@@ -1383,6 +1392,9 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
+
+ s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
bool optc1_get_otg_active_size(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index ab81594a7fad..2f3bd7648ba7 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -129,6 +129,8 @@ struct dcn_optc_registers {
uint32_t OTG_V_TOTAL_MID;
uint32_t OTG_V_TOTAL_MIN;
uint32_t OTG_V_TOTAL_CONTROL;
+ uint32_t OTG_V_COUNT_STOP_CONTROL;
+ uint32_t OTG_V_COUNT_STOP_CONTROL2;
uint32_t OTG_TRIGA_CNTL;
uint32_t OTG_TRIGA_MANUAL_TRIG;
uint32_t OTG_MANUAL_FLOW_CONTROL;
@@ -515,12 +517,15 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
+#define V_TOTAL_REGS(type)
+
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
type OTG_V_SYNC_MODE;\
type OTG_DRR_TRIGGER_WINDOW_START_X;\
type OTG_DRR_TRIGGER_WINDOW_END_X;\
type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\
+ V_TOTAL_REGS(type)\
type OTG_OUT_MUX;\
type OTG_M_CONST_DTO_PHASE;\
type OTG_M_CONST_DTO_MODULO;\
@@ -557,7 +562,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
@@ -580,7 +586,9 @@ struct dcn_optc_registers {
type OTG_CRC1_WINDOWB_X_END_READBACK;\
type OTG_CRC1_WINDOWB_Y_START_READBACK;\
type OTG_CRC1_WINDOWB_Y_END_READBACK;\
- type OPTC_FGCG_REP_DIS;
+ type OPTC_FGCG_REP_DIS;\
+ type OTG_V_COUNT_STOP;\
+ type OTG_V_COUNT_STOP_TIMER;
struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 58bdbd859bf9..d6f095b4555d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -462,16 +462,6 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* Set the min/max selectors unconditionally so that
- * DMCUB fw may change OTG timings when necessary
- * TODO: Remove the w/a after fixing the issue in DMCUB firmware
- */
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 1,
- OTG_V_TOTAL_MAX_SEL, 1,
- OTG_FORCE_LOCK_ON_EVENT, 0,
- OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 823493543325..52eab8fccb7f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -260,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
@@ -345,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 8ce3b178cab0..0c2c14695561 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 5b1547508850..d393be30dff8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -32,6 +32,7 @@
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
#define REG(reg)\
optc1->tg_regs->reg
@@ -213,6 +214,167 @@ static bool optc35_configure_crc(struct timing_generator *optc,
return true;
}
+static void optc35_setup_manual_trigger(struct timing_generator *optc)
+{
+ if (!optc || !optc->ctx)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ struct dc *dc = optc->ctx->dc;
+
+ if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
+ dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst);
+ else {
+ /*
+ * MIN_MASK_EN is gone and MASK is now always enabled.
+ *
+ * To get it to it work with manual trigger we need to make sure
+ * we program the correct bit.
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
+ // Setup manual flow control for EOF via TRIG_A
+ if (optc->funcs && optc->funcs->setup_manual_trigger)
+ optc->funcs->setup_manual_trigger(optc);
+ }
+}
+
+void optc35_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ if (!optc || !params)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t max_otg_v_total = optc1->max_v_total - 1;
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc,
+ params->vertical_total_min - 1, params->vertical_total_max - 1);
+ optc35_setup_manual_trigger(optc);
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0);
+}
+
+static void optc35_set_long_vtotal(
+ struct timing_generator *optc,
+ const struct long_vtotal_params *params)
+{
+ if (!optc || !params)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t vcount_stop_timer = 0, vcount_stop = 0;
+ uint32_t max_otg_v_total = optc1->max_v_total - 1;
+
+ if (params->vertical_total_min <= max_otg_v_total && params->vertical_total_max <= max_otg_v_total)
+ return;
+
+ if (params->vertical_total_max == 0 || params->vertical_total_min == 0) {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ } else if (params->vertical_total_max == params->vertical_total_min) {
+ vcount_stop = params->vertical_blank_start;
+ vcount_stop_timer = params->vertical_total_max - max_otg_v_total;
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, max_otg_v_total, max_otg_v_total);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer);
+ } else {
+ // Variable rate, keep DRR trigger mask
+ if (params->vertical_total_min > max_otg_v_total) {
+ // cannot be supported
+ // If MAX_OTG_V_COUNT < DRR trigger < v_total_min < v_total_max,
+ // DRR trigger will drop the vtotal counting directly to a new frame.
+ // But it should trigger between v_total_min and v_total_max.
+ ASSERT(0);
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0);
+ } else {
+ // For total_min <= MAX_OTG_V_COUNT and total_max > MAX_OTG_V_COUNT
+ vcount_stop = params->vertical_total_min;
+ vcount_stop_timer = params->vertical_total_max - max_otg_v_total;
+
+ // Example:
+ // params->vertical_total_min 1000
+ // params->vertical_total_max 2000
+ // MAX_OTG_V_COUNT_STOP = 1500
+ //
+ // If DRR event not happened,
+ // time 0,1,2,3,4,...1000,1001,........,1500,1501,1502, ...1999
+ // vcount 0,1,2,3,4....1000...................,1001,1002,1003,...1399
+ // vcount2 0,1,2,3,4,..499,
+ // else (DRR event happened, ex : at line 1004)
+ // time 0,1,2,3,4,...1000,1001.....1004, 0
+ // vcount 0,1,2,3,4....1000,.............. 0 (new frame)
+ // vcount2 0,1,2, 3, -
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc,
+ params->vertical_total_min - 1, max_otg_v_total);
+ optc35_setup_manual_trigger(optc);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer);
+ }
+ }
+}
+
static struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -245,7 +407,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr,
+ .set_drr = optc35_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
@@ -275,6 +437,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.init_odm = optc3_init_odm,
+ .set_long_vtotal = optc35_set_long_vtotal,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index 1f422e4c468f..d077e2392379 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -65,10 +65,14 @@
SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\
- SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh)
+ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable);
+void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
#endif /* __DC_OPTC_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 0a75ed8962a5..db9048974d74 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -102,10 +102,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21)
###############################################################################
-###############################################################################
-
-###############################################################################
-
RESOURCE_DCN30 = dcn30_resource.o
AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30))
@@ -194,6 +190,12 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35)
###############################################################################
+RESOURCE_DCN351 = dcn351_resource.o
+
+AMD_DAL_RESOURCE_DCN351 = $(addprefix $(AMDDALPATH)/dc/resource/dcn351/,$(RESOURCE_DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
+
###############################################################################
endif
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index d1edac46c9a0..88afb2a30eef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -864,8 +864,6 @@ static struct clock_source *find_matching_pll(
default:
return NULL;
}
-
- return NULL;
}
static enum dc_status build_mapped_resource(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 20662edd0ae4..621825a51f46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -1060,7 +1060,7 @@ static bool dce120_resource_construct(
struct irq_service_init_data irq_init_data;
static const struct resource_create_funcs *res_funcs;
bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
- uint32_t pipe_fuses;
+ uint32_t pipe_fuses = 0;
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 35a2cce0c2b8..56ee45e12b46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -56,7 +56,6 @@
#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_i2c.h"
-/* TODO remove this include */
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index d08d10969251..563c5eec83ff 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -513,7 +513,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = true
+ .p010 = false
},
.max_upscale_factor = {
@@ -569,6 +569,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_legacy_fast_update = true,
};
static void dcn10_dpp_destroy(struct dpp **dpp)
@@ -1631,6 +1632,7 @@ static bool dcn10_resource_construct(
/* valid pipe num */
pool->base.pipe_count = j;
pool->base.timing_generator_count = j;
+ pool->base.mpcc_count = j;
/* within dml lib, it is hard code to 4. If ASIC pipe is fused,
* the value may be changed
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index f9c5bc624be3..0a939437e19f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dc.h"
@@ -64,6 +62,9 @@
#include "dcn20/dcn20_vmid.h"
#include "dce/dce_panel_cntl.h"
+#include "dcn20/dcn20_dwb.h"
+#include "dcn20/dcn20_mmhubbub.h"
+
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -73,9 +74,6 @@
#include "nbio/nbio_2_3_offset.h"
-#include "dcn20/dcn20_dwb.h"
-#include "dcn20/dcn20_mmhubbub.h"
-
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
@@ -85,11 +83,10 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
-#include "link_enc_cfg.h"
-
-#include "amdgpu_socbb.h"
+#include "link_enc_cfg.h"
#include "link.h"
+
#define DC_LOGGER_INIT(logger)
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -1284,8 +1281,13 @@ void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
{
+ struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool;
- dcn20_build_pipe_pix_clk_params(pipe_ctx);
+ if (pool->funcs->build_pipe_pix_clk_params) {
+ pool->funcs->build_pipe_pix_clk_params(pipe_ctx);
+ } else {
+ dcn20_build_pipe_pix_clk_params(pipe_ctx);
+ }
pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
@@ -2451,6 +2453,7 @@ static bool dcn20_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 914b234d7f6b..070a4efb308b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -55,7 +55,6 @@
#include "dce110/dce110_resource.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
-#include "dcn201/dcn201_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "cyan_skillfish_ip_offset.h"
@@ -182,6 +181,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.socclk_mhz = 1254.0,
.dram_speed_mts = 14000.0,
},
+ /* state4 is not an actual state, just defines unsupported for dml*/
{
.state = 4,
.dscclk_mhz = 400.0,
@@ -566,6 +566,8 @@ static const struct resource_caps res_cap_dnc201 = {
.num_audio = 2,
.num_stream_encoder = 2,
.num_pll = 2,
+ .num_dwb = 0,
+ .num_dsc = 0,
.num_ddc = 2,
};
@@ -612,7 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_tri_buf = false,
+ .enable_tri_buf = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 65d337731f56..8663cbc3d1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -581,32 +581,6 @@ static const struct resource_caps res_cap_rn = {
.num_dsc = 3,
};
-#ifdef DIAGS_BUILD
-static const struct resource_caps res_cap_rn_FPGA_4pipe = {
- .num_timing_generator = 4,
- .num_opp = 4,
- .num_video_plane = 4,
- .num_audio = 7,
- .num_stream_encoder = 4,
- .num_pll = 4,
- .num_dwb = 1,
- .num_ddc = 4,
- .num_dsc = 0,
-};
-
-static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
- .num_timing_generator = 2,
- .num_opp = 2,
- .num_video_plane = 2,
- .num_audio = 7,
- .num_stream_encoder = 2,
- .num_pll = 4,
- .num_dwb = 1,
- .num_ddc = 4,
- .num_dsc = 2,
-};
-#endif
-
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
@@ -1415,16 +1389,11 @@ static bool dcn21_resource_construct(
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
- uint32_t num_pipes;
+ uint32_t num_pipes = 0;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_rn;
-#ifdef DIAGS_BUILD
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc;
- pool->base.res_cap = &res_cap_rn_FPGA_4pipe;
-#endif
pool->base.funcs = &dcn21_res_pool_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 37a64186f324..f35cc307830b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1639,7 +1639,7 @@ noinline bool dcn30_internal_validate_bw(
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx, vlevel = 0;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
ASSERT(pipes);
@@ -2050,6 +2050,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
@@ -2169,6 +2172,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
+ } else {
+ /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+ * If it just so happens that the memory bandwidth is low enough such that
+ * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA
+ * target, we need to populate the optimal UCLK for each DCFCLK STA target to
+ * be the max UCLK.
+ */
+ if (j == num_uclk_states - 1) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 25cd6236b054..8bc1bcaeaa47 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1143,7 +1143,7 @@ static bool dcn303_resource_construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
- struct ddc_service_init_data ddc_init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 31035fc3d868..d4c3e2754f51 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -75,7 +75,6 @@
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
-// TODO: change include headers /amd/include/asic_reg after upstream
#include "yellow_carp_offset.h"
#include "dcn/dcn_3_1_2_offset.h"
#include "dcn/dcn_3_1_2_sh_mask.h"
@@ -892,7 +891,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true,
.enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+ .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
.using_dml2 = false,
};
@@ -1311,6 +1310,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1645,7 +1646,7 @@ int dcn31_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
DC_FP_START();
@@ -1767,11 +1768,14 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
fast_validate = false;
@@ -1941,8 +1945,6 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
- dc->config.use_old_fixed_vs_sequence = true;
-
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index c97391edb5ff..ff50f43e4c00 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -925,27 +925,10 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
+ .enable_legacy_fast_update = true,
.using_dml2 = false,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
-};
-
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1384,6 +1367,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1744,6 +1729,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
if (filter_modes_for_single_channel_workaround(dc, context))
goto validate_fail;
@@ -1938,8 +1926,6 @@ static bool dcn314_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else
- dc->debug = debug_defaults_diags;
/* Disable pipe power gating */
dc->debug.disable_dpp_power_gate = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 515ba435f759..4ce0f4bf1d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1309,6 +1309,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index b9753d4606f8..5fd52c5fcee4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -125,7 +125,6 @@
#include "link_enc_cfg.h"
#define DCN3_16_MAX_DET_SIZE 384
-#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_CRB_SEGMENT_SIZE_KB 64
enum dcn31_clk_src_array_id {
@@ -1306,6 +1305,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1614,7 +1615,7 @@ static int dcn316_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 6f10052caeef..abd76345d1e4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1304,6 +1304,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1751,6 +1753,9 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
@@ -1771,6 +1776,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context);
+ dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1798,7 +1804,9 @@ bool dcn32_validate_bandwidth(struct dc *dc,
bool out = false;
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
else
out = dml1_validate(dc, context, fast_validate);
return out;
@@ -1814,9 +1822,48 @@ int dcn32_populate_dml_pipes_from_context(
struct pipe_ctx *pipe = NULL;
bool subvp_in_use = false;
struct dc_crtc_timing *timing;
+ int subvp_main_pipe_index = -1;
+ enum mall_stream_type mall_type;
+ bool single_display_subvp = false;
+ struct dc_stream_state *stream = NULL;
+ int num_subvp_main = 0;
+ int num_subvp_phantom = 0;
+ int num_subvp_none = 0;
+ int odm_slice_count;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ /* For single display subvp, look for subvp main so if we have phantom
+ * pipe, we can set odm policy to match main pipe
+ */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+ mall_type = dc_state_get_stream_subvp_type(context, stream);
+ if (mall_type == SUBVP_MAIN)
+ num_subvp_main++;
+ else if (mall_type == SUBVP_PHANTOM)
+ num_subvp_phantom++;
+ else
+ num_subvp_none++;
+ }
+ if (num_subvp_main == 1 && num_subvp_phantom == 1 && num_subvp_none == 0)
+ single_display_subvp = true;
+
+ if (single_display_subvp) {
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &res_ctx->pipe_ctx[i];
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ mall_type = dc_state_get_pipe_subvp_type(context, pipe);
+ if (mall_type == SUBVP_MAIN) {
+ if (resource_is_pipe_type(pipe, OTG_MASTER))
+ subvp_main_pipe_index = i;
+ }
+ pipe_cnt++;
+ }
+ }
+
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1831,7 +1878,21 @@ int dcn32_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
if (dc->config.enable_windowed_mpo_odm &&
dc->debug.enable_single_display_2to1_odm_policy) {
- switch (resource_get_odm_slice_count(pipe)) {
+ /* For single display subvp, if pipe is phantom pipe,
+ * then copy odm policy from subvp main pipe
+ */
+ mall_type = dc_state_get_pipe_subvp_type(context, pipe);
+ if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) {
+ if (subvp_main_pipe_index < 0) {
+ odm_slice_count = -1;
+ ASSERT(0);
+ } else {
+ odm_slice_count = resource_get_odm_slice_count(&res_ctx->pipe_ctx[subvp_main_pipe_index]);
+ }
+ } else {
+ odm_slice_count = resource_get_odm_slice_count(pipe);
+ }
+ switch (odm_slice_count) {
case 2:
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
break;
@@ -1844,6 +1905,7 @@ int dcn32_populate_dml_pipes_from_context(
} else {
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
}
+
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
@@ -1911,6 +1973,22 @@ int dcn32_populate_dml_pipes_from_context(
return pipe_cnt;
}
+unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes)
+{
+ uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
+
+ /* add 2 lines for worst case alignment */
+ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
+
+ total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
+ lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
+ num_ways = cache_lines_used / lines_per_way;
+ if (cache_lines_used % lines_per_way > 0)
+ num_ways++;
+
+ return num_ways;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = dcn32_subvp_in_use,
@@ -1928,8 +2006,20 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct dml2_configuration_options dml2_opt = dc->dml2_options;
+
DC_FP_START();
+
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+
+ dml2_opt.use_clock_dc_limits = false;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2);
+
+ dml2_opt.use_clock_dc_limits = true;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+
DC_FP_END();
}
@@ -1957,6 +2047,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2045,7 +2136,8 @@ static bool dcn32_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- dc->caps.mall_size_total = 0;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
@@ -2118,6 +2210,7 @@ static bool dcn32_resource_construct(
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.dc_mode_clk_limit_support = true;
+ dc->config.enable_windowed_mpo_odm = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2358,30 +2451,10 @@ static bool dcn32_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
-
- dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
- dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
@@ -2479,7 +2552,7 @@ struct resource_pool *dcn32_create_resource_pool(
* full update which delays the flip for 1 frame. If we use the original pipe
* we don't have to toggle its power. So we can flip faster.
*/
-static int find_optimal_free_pipe_as_secondary_dpp_pipe(
+int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
const struct resource_pool *pool,
@@ -2662,7 +2735,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
- free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
+ free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
&cur_ctx->res_ctx, &new_ctx->res_ctx,
pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 0c87b0fabba7..fee67fbab8e2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -42,6 +42,7 @@
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -112,10 +113,6 @@ void dcn32_calculate_wm_and_dlg(
int pipe_cnt,
int vlevel);
-uint32_t dcn32_helper_mall_bytes_to_ways(
- struct dc *dc,
- uint32_t total_size_in_mall_bytes);
-
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -140,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
bool dcn32_is_center_timing(struct pipe_ctx *pipe);
bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
+int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *new_opp_head);
+
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
@@ -181,6 +184,10 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
+unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 74412e5f03fe..e4b360d89b3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1288,6 +1288,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1579,8 +1581,20 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct dml2_configuration_options dml2_opt = dc->dml2_options;
+
DC_FP_START();
+
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+
+ dml2_opt.use_clock_dc_limits = false;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2);
+
+ dml2_opt.use_clock_dc_limits = true;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+
DC_FP_END();
}
@@ -1608,6 +1622,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1695,7 +1710,9 @@ static bool dcn321_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- dc->caps.mall_size_total = 0;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
@@ -1760,6 +1777,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->config.dc_mode_clk_limit_support = true;
+ dc->config.enable_windowed_mpo_odm = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -1995,30 +2013,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
-
- dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
- dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 5fdcda8f8602..2df8a742516c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -701,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -721,7 +721,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
.disable_optc_power_gate = true, /*should the same as above two*/
- .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -764,11 +764,12 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
- .enable_single_display_2to1_odm_policy = false,
+ .enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
@@ -782,6 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.psp_disabled_wa = true,
.ips2_eval_delay_us = 2000,
.ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
};
@@ -1366,6 +1368,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1732,7 +1736,9 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
{
bool out = false;
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
if (fast_validate)
return out;
@@ -1905,7 +1911,8 @@ static bool dcn35_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
-
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2135,15 +2142,9 @@ static bool dcn35_resource_construct(
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
+
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index a51c4a9eaafe..f97bb4cb3761 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -240,6 +240,8 @@ struct resource_pool *dcn35_create_resource_pool(
SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\
SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\
SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI_ARR(OTG_V_COUNT_STOP_CONTROL, OTG, inst),\
+ SRI_ARR(OTG_V_COUNT_STOP_CONTROL2, OTG, inst),\
SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\
SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
new file mode 100644
index 000000000000..ddf9560ab772
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -0,0 +1,2163 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn31/dcn31_init.h"
+#include "dcn351/dcn351_init.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn351_resource.h"
+
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn35/dcn35_hubbub.h"
+#include "dcn32/dcn32_mpc.h"
+#include "dcn35/dcn35_hubp.h"
+#include "irq/dcn351/irq_service_dcn351.h"
+#include "dcn35/dcn35_dpp.h"
+#include "dcn35/dcn35_optc.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dce110/dce110_hwseq.h"
+#include "dcn35/dcn35_opp.h"
+#include "dcn35/dcn35_dsc.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+
+#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn35/dcn35_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn32/dcn32_hpo_dp_link_encoder.h"
+#include "link.h"
+#include "dcn31/dcn31_apg.h"
+#include "dcn32/dcn32_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn35/dcn35_dccg.h"
+#include "dcn35/dcn35_pg_cntl.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn31/dcn31_panel_cntl.h"
+#include "dcn35/dcn35_hwseq.h"
+#include "dcn35/dcn35_dio_link_encoder.h"
+#include "dml/dcn31/dcn31_fpu.h" /*todo*/
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dml/dcn351/dcn351_fpu.h"
+#include "dcn35/dcn35_dwb.h"
+#include "dcn35/dcn35_mmhubbub.h"
+
+#include "dcn/dcn_3_5_1_offset.h"
+#include "dcn/dcn_3_5_1_sh_mask.h"
+#include "nbio/nbio_7_11_0_offset.h"
+#include "mmhub/mmhub_3_3_0_offset.h"
+#include "mmhub/mmhub_3_3_0_sh_mask.h"
+
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
+#include "reg_helper.h"
+#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+
+#include "dml2/dml2_wrapper.h"
+
+#include "link_enc_cfg.h"
+#define DC_LOGGER_INIT(logger)
+
+enum dcn351_clk_src_array_id {
+ DCN351_CLK_SRC_PLL0,
+ DCN351_CLK_SRC_PLL1,
+ DCN351_CLK_SRC_PLL2,
+ DCN351_CLK_SRC_PLL3,
+ DCN351_CLK_SRC_PLL4,
+ DCN351_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file
+ */
+
+/* DCN */
+/* TODO awful hack. fixup dcn20_dwb.h */
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
+
+#define SRI(reg_name, block, id)\
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI2(reg_name, block, id)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_DWB(reg_name, temp_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCCG_SRII(reg_name, block, id)\
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
+
+static struct bios_registers bios_regs;
+
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK)
+};
+
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
+
+static struct dce_abm_registers abm_regs[4];
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
+
+static struct dce_audio_registers audio_regs[7];
+
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define vpg_regs_init(id)\
+ VPG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_vpg_registers vpg_regs[10];
+
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
+};
+
+#define afmt_regs_init(id)\
+ AFMT_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_afmt_registers afmt_regs[6];
+
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_apg_registers apg_regs[4];
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs_init(id)\
+ SE_DCN35_REG_LIST_RI(id)
+
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
+
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
+
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN35_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
+
+static struct dcn10_link_enc_registers link_enc_regs[5];
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \
+ //DPCS_DCN31_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \
+ //DPCS_DCN31_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
+
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
+
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn3_dpp_registers dpp_regs[4];
+
+static const struct dcn35_dpp_shift tf_shift = {
+ DPP_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dpp_mask tf_mask = {
+ DPP_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn35_opp_registers opp_regs[4];
+
+static const struct dcn35_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \
+ )
+
+static struct dce110_aux_registers aux_engine_regs[5];
+
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dcn30_dwbc_registers dwbc35_regs[1];
+
+static const struct dcn35_dwbc_shift dwbc35_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dwbc_mask dwbc35_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn35_mmhubbub_registers mcif_wb35_regs[1];
+
+static const struct dcn35_mmhubbub_shift mcif_wb35_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn35_mmhubbub_mask mcif_wb35_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define dsc_regsDCN35_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
+
+static struct dcn20_dsc_registers dsc_regs[4];
+
+static const struct dcn35_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+static struct dcn30_mpc_registers mpc_regs;
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
+
+static const struct dcn30_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct dcn30_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
+
+static const struct dcn_optc_shift optc_shift = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn_optc_mask optc_mask = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN30_RI(id)
+
+static struct dcn_hubp2_registers hubp_regs[4];
+
+
+static const struct dcn35_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dcn_hubbub_registers hubbub_reg;
+
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN35(0)
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN35()
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct pg_cntl_registers pg_cntl_regs;
+
+#define pg_cntl_dcn35_regs_init() \
+ PG_CNTL_REG_LIST_DCN35()
+
+static const struct pg_cntl_shift pg_cntl_shift = {
+ PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct pg_cntl_mask pg_cntl_mask = {
+ PG_CNTL_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define SRII2(reg_name_pre, reg_name_post, id)\
+ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
+ ## id ## _ ## reg_name_post ## _BASE_IDX) + \
+ reg ## reg_name_pre ## id ## _ ## reg_name_post
+
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN35_REG_LIST()
+
+#define HWSEQ_DCN35_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh)
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN35_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN35_MASK_SH_LIST(_MASK)
+};
+
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
+
+static struct dcn_vmid_registers vmid_regs[16];
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct resource_caps res_cap_dcn351 = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
+ .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_mpc_3dlut = 2,
+ .num_dsc = 4,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = true,
+ .ayuv = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ // 6:1 downscaling ratio: 1000/6 = 166.666
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 167,
+ .fp16 = 167
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = false,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_clock_gate = false,
+ .disable_dsc_power_gate = true,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 4096,/*upto true 4k*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+ .pstate_enabled = true,
+ .use_max_lb = true,
+ .enable_mem_low_power = {
+ .bits = {
+ .vga = false,
+ .i2c = true,
+ .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
+ }
+ },
+ .root_clock_optimization = {
+ .bits = {
+ .dpp = true,
+ .dsc = true,/*dscclk and dsc pg*/
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = true,
+ .dpiasymclk = true,
+ }
+ },
+ .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
+ .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 2100,
+ .using_dml2 = true,
+ .support_eDP1_5 = true,
+ .enable_hpo_pg_support = false,
+ .enable_legacy_fast_update = true,
+ .enable_single_display_2to1_odm_policy = true,
+ .disable_idle_power_optimizations = false,
+ .dmcub_emulation = false,
+ .disable_boot_optimizations = false,
+ .disable_unbounded_requesting = false,
+ .disable_mem_low_power = false,
+ //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
+ .enable_double_buffered_dsc_pg_support = true,
+ .enable_dp_dig_pixel_rate_div_policy = 1,
+ .disable_z10 = false,
+ .ignore_pg = true,
+ .psp_disabled_wa = true,
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
+ .static_screen_wait_frames = 2,
+};
+
+static const struct dc_panel_config panel_config_defaults = {
+ .psr = {
+ .disable_psr = false,
+ .disallow_psrsu = false,
+ .disallow_replay = false,
+ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
+static void dcn35_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
+ bool success = (dpp != NULL);
+
+ if (!success)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
+ success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift,
+ &tf_mask);
+ if (success) {
+ dpp35_set_fgcg(
+ dpp,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp);
+ return &dpp->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+static struct output_pixel_processor *dcn35_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
+ dcn35_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+
+ dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp);
+
+ return &opp->base;
+}
+
+static struct dce_aux *dcn31_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+/* ========================================================== */
+
+/*
+ * DPIA index | Preferred Encoder | Host Router
+ * 0 | C | 0
+ * 1 | First Available | 0
+ * 2 | D | 1
+ * 3 | First Available | 1
+ */
+/* ========================================================== */
+static const enum engine_id dpia_to_preferred_enc_id_table[] = {
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGD
+};
+
+static enum engine_id dcn351_get_preferred_eng_id_dpia(unsigned int dpia_index)
+{
+ return dpia_to_preferred_enc_id_table[dpia_index];
+}
+
+static struct dce_i2c_hw *dcn31_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+static struct mpc *dcn35_mpc_create(
+ struct dc_context *ctx,
+ int num_mpcc,
+ int num_rmu)
+{
+ struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL);
+
+ if (!mpc30)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
+ dcn32_mpc_construct(mpc30, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc,
+ num_rmu);
+
+ return &mpc30->base;
+}
+
+static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub3)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
+ hubbub35_construct(hubbub3, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask,
+ 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/
+ 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/
+ 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/);
+
+
+ for (i = 0; i < res_cap_dcn351.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub3->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub3->base;
+}
+
+static struct timing_generator *dcn35_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &optc_regs[instance];
+ tgn10->tg_shift = &optc_shift;
+ tgn10->tg_mask = &optc_mask;
+
+ dcn35_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+static struct link_encoder *dcn35_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+
+ if (!enc20)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
+ dcn35_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+/* Create a minimal link encoder object not associated with a particular
+ * physical connector.
+ * resource_funcs.link_enc_create_minimal
+ */
+static struct link_encoder *dcn31_link_enc_create_minimal(
+ struct dc_context *ctx, enum engine_id eng_id)
+{
+ struct dcn20_link_encoder *enc20;
+
+ if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
+ return NULL;
+
+ enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ if (!enc20)
+ return NULL;
+
+ dcn31_link_encoder_construct_minimal(
+ enc20,
+ ctx,
+ &link_enc_feature,
+ &link_enc_regs[eng_id - ENGINE_ID_DIGA],
+ eng_id);
+
+ return &enc20->enc10.base;
+}
+
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dcn31_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dcn31_panel_cntl_construct(panel_cntl, init_data);
+
+ return &panel_cntl->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+static struct audio *dcn31_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+ audio_regs_init(5);
+ audio_regs_init(6);
+
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct vpg *dcn31_vpg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
+
+ if (!vpg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
+ vpg31_construct(vpg31, ctx, inst,
+ &vpg_regs[inst],
+ &vpg_shift,
+ &vpg_mask);
+
+ return &vpg31->base;
+}
+
+static struct afmt *dcn31_afmt_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
+
+ if (!afmt31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
+ afmt31_construct(afmt31, ctx, inst,
+ &afmt_regs[inst],
+ &afmt_shift,
+ &afmt_mask);
+
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
+}
+
+static struct stream_encoder *dcn35_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1;
+ struct vpg *vpg;
+ struct afmt *afmt;
+ int vpg_inst;
+ int afmt_inst;
+
+ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
+ if (eng_id <= ENGINE_ID_DIGF) {
+ vpg_inst = eng_id;
+ afmt_inst = eng_id;
+ } else
+ return NULL;
+
+ enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ afmt = dcn31_afmt_create(ctx, afmt_inst);
+
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
+ dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
+ eng_id, vpg, afmt,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct dce_hwseq *dcn351_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn31_create_audio,
+ .create_stream_encoder = dcn35_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
+ .create_hwseq = dcn351_hwseq_create,
+};
+
+static void dcn351_resource_destruct(struct dcn351_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ if (pool->base.stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
+ pool->base.stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.stream_enc[i]->afmt != NULL) {
+ kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
+ pool->base.stream_enc[i]->afmt = NULL;
+ }
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn35_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
+ if (pool->base.mpc_lut[i] != NULL) {
+ dc_3dlut_func_release(pool->base.mpc_lut[i]);
+ pool->base.mpc_lut[i] = NULL;
+ }
+ if (pool->base.mpc_shaper[i] != NULL) {
+ dc_transfer_func_release(pool->base.mpc_shaper[i]);
+ pool->base.mpc_shaper[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.multiple_abms[i] != NULL)
+ dce_abm_destroy(&pool->base.multiple_abms[i]);
+ }
+
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
+ if (pool->base.pg_cntl != NULL)
+ dcn_pg_cntl_destroy(&pool->base.pg_cntl);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn35_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+
+ if (!hubp2)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
+ if (hubp35_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx)
+{
+ dcn35_dwbc_set_fgcg(
+ dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb);
+}
+
+static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc30) {
+ dm_error("DC: failed to create dwbc30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dwbc35_regs
+ dwbc_regs_dcn3_init(0);
+
+ dcn35_dwbc_construct(dwbc30, ctx,
+ &dwbc35_regs[i],
+ &dwbc35_shift,
+ &dwbc35_mask,
+ i);
+
+ pool->dwbc[i] = &dwbc30->base;
+
+ dcn35_dwbc_init(dwbc30, ctx);
+ }
+ return true;
+}
+
+static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30,
+ struct dc_context *ctx)
+{
+ dcn35_mmhubbub_set_fgcg(
+ mcif_wb30,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub);
+}
+
+static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb30) {
+ dm_error("DC: failed to create mcif_wb30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb35_regs
+ mcif_wb_regs_dcn3_init(0);
+
+ dcn35_mmhubbub_construct(mcif_wb30, ctx,
+ &mcif_wb35_regs[i],
+ &mcif_wb35_shift,
+ &mcif_wb35_mask,
+ i);
+
+ dcn35_mmhubbub_init(mcif_wb30, ctx);
+
+ pool->mcif_wb[i] = &mcif_wb30->base;
+ }
+ return true;
+}
+
+static struct display_stream_compressor *dcn35_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN35_init(0),
+ dsc_regsDCN35_init(1),
+ dsc_regsDCN35_init(2),
+ dsc_regsDCN35_init(3);
+
+ dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ dsc35_set_fgcg(dsc,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc);
+ return &dsc->base;
+}
+
+static void dcn351_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn351_resource_pool *dcn351_pool = TO_DCN351_RES_POOL(*pool);
+
+ dcn351_resource_destruct(dcn351_pool);
+ kfree(dcn351_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn35_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
+
+static bool dcn351_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
+
+ if (fast_validate)
+ return out;
+
+ DC_FP_START();
+ dcn35_decide_zstate_support(dc, context);
+ DC_FP_END();
+
+ return out;
+}
+
+static struct resource_funcs dcn351_res_pool_funcs = {
+ .destroy = dcn351_destroy_resource_pool,
+ .link_enc_create = dcn35_link_encoder_create,
+ .link_enc_create_minimal = dcn31_link_enc_create_minimal,
+ .link_encs_assign = link_enc_cfg_link_encs_assign,
+ .link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ .panel_cntl_create = dcn31_panel_cntl_create,
+ .validate_bandwidth = dcn351_validate_bandwidth,
+ .calculate_wm_and_dlg = NULL,
+ .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
+ .release_pipe = dcn20_release_pipe,
+ .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn35_get_panel_config_defaults,
+ .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
+};
+
+static bool dcn351_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn351_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dcn351;
+
+ pool->base.funcs = &dcn351_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 600;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ if (dc->config.forceHBR2CP2520)
+ dc->caps.force_dp_tps4_for_cp2520 = false;
+ dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
+
+ dc->caps.edp_dsc_support = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
+ dc->caps.seamless_odm = true;
+
+ dc->caps.zstate_support = true;
+ dc->caps.ips_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 1;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
+ dc->caps.color.dpp.post_csc = 1;
+ dc->caps.color.dpp.gamma_corr = 1;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
+ // no OGAM ROM on DCN301
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 1;
+ dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
+ * to provide some margin.
+ * It's expected for furture ASIC to have equal or higher value, in order to
+ * have determinstic power improvement from generate to genration.
+ * (i.e., we should not expect new ASIC generation with lower vmin rate)
+ */
+ dc->caps.max_disp_clock_khz_at_vmin = 650000;
+
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
+ /* Use psp mailbox to enable assr */
+ dc->config.use_assr_psp_message = true;
+
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ /* interop bit is implicit */
+ {
+ dc->caps.vbios_lttpr_aware = true;
+ }
+ }
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ /* Clock Sources for Pixel Clock*/
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL0] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL1] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL2] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL3] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL4] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+
+ pool->base.clk_src_count = DCN351_CLK_SRC_TOTAL;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31);
+
+ /* TODO: DCCG */
+ pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT pg_cntl_regs
+ pg_cntl_dcn35_regs_init();
+
+ pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask);
+ if (pool->base.pg_cntl == NULL) {
+ dm_error("DC: failed to create power gate control!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* TODO: IRQ */
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn351_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* HUBBUB */
+ pool->base.hubbub = dcn35_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ /* HUBPs, DPPs, OPPs and TGs */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn35_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create hubps!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn35_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn35_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn35_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+ pool->base.timing_generator_count = i;
+
+ /* PSR */
+ pool->base.psr = dmub_psr_create(ctx);
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* ABM */
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.multiple_abms[i] = dmub_abm_create(ctx,
+ &abm_regs[i],
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.multiple_abms[i] == NULL) {
+ dm_error("DC: failed to create abm for pipe %d!\n", i);
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* MPC and DSC */
+ pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn35_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ /* DWB and MMHUBBUB */
+ if (!dcn35_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+
+ if (!dcn35_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ /* AUX and I2C */
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ /* DCN3.5 has 6 DPIA */
+ pool->base.usb4_dpia_count = 4;
+ if (dc->debug.dpia_debug.bits.disable_dpia)
+ pool->base.usb4_dpia_count = 0;
+
+ /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto create_fail;
+
+ /* HW Sequencer and Plane caps */
+ dcn351_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
+ dc->dml2_options.use_native_pstate_optimization = true;
+ dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
+ if (dc->config.EnableMinDispClkODM)
+ dc->dml2_options.minimize_dispclk_using_odm = true;
+ dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
+
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
+ dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
+
+ dc->dml2_options.max_segments_per_hubp = 24;
+ dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+
+ if (dc->config.sdpif_request_limit_words_per_umc == 0)
+ dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
+
+ return true;
+
+create_fail:
+
+ dcn351_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn351_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn351_resource_pool *pool =
+ kzalloc(sizeof(struct dcn351_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn351_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h
new file mode 100644
index 000000000000..f3e045777a3d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef _DCN351_RESOURCE_H_
+#define _DCN351_RESOURCE_H_
+
+#include "core_types.h"
+
+extern struct _vcs_dpi_ip_params_st dcn3_51_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc;
+
+#define TO_DCN351_RES_POOL(pool)\
+ container_of(pool, struct dcn351_resource_pool, base)
+
+struct dcn351_resource_pool {
+ struct resource_pool base;
+};
+
+struct resource_pool *dcn351_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#endif /* _DCN351_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c78c9224ab60..2fde1f043d50 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -71,6 +71,8 @@
extern "C" {
#endif
+#define DMUB_PC_SNAPSHOT_COUNT 10
+
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
@@ -78,6 +80,12 @@ struct dmub_srv_dcn31_regs;
struct dmcub_trace_buf_entry;
+/* enum dmub_window_memory_type - memory location type specification for windows */
+enum dmub_window_memory_type {
+ DMUB_WINDOW_MEMORY_TYPE_FB = 0,
+ DMUB_WINDOW_MEMORY_TYPE_GART
+};
+
/* enum dmub_status - return code for dmcub functions */
enum dmub_status {
DMUB_STATUS_OK = 0,
@@ -106,6 +114,7 @@ enum dmub_asic {
DMUB_ASIC_DCN32,
DMUB_ASIC_DCN321,
DMUB_ASIC_DCN35,
+ DMUB_ASIC_DCN351,
DMUB_ASIC_MAX,
};
@@ -119,6 +128,7 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_TOTAL,
};
@@ -203,7 +213,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size;
const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
- bool is_mailbox_in_inbox;
+ const enum dmub_window_memory_type *window_memory_type;
};
/**
@@ -223,7 +233,7 @@ struct dmub_srv_region_params {
*/
struct dmub_srv_region_info {
uint32_t fb_size;
- uint32_t inbox_size;
+ uint32_t gart_size;
uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL];
};
@@ -239,9 +249,10 @@ struct dmub_srv_region_info {
struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info;
void *cpu_fb_addr;
- void *cpu_inbox_addr;
+ void *cpu_gart_addr;
uint64_t gpu_fb_addr;
- uint64_t gpu_inbox_addr;
+ uint64_t gpu_gart_addr;
+ const enum dmub_window_memory_type *window_memory_type;
};
/**
@@ -286,18 +297,30 @@ struct dmub_srv_hw_params {
bool dpia_hpd_int_enable_supported;
bool disable_clock_gate;
bool disallow_dispclk_dppclk_ds;
+ bool ips_sequential_ono;
enum dmub_memory_access_type mem_access_type;
enum dmub_ips_disable_type disable_ips;
};
/**
+ * struct dmub_srv_debug - Debug info for dmub_srv
+ * @timeout_occured: Indicates a timeout occured on any message from driver to dmub
+ * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored
+ */
+struct dmub_srv_debug {
+ bool timeout_occured;
+ union dmub_rb_cmd timeout_cmd;
+ unsigned long long timestamp;
+};
+
+/**
* struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for
* debugging purposes, including logging, crash analysis, etc.
*/
struct dmub_diagnostic_data {
uint32_t dmcub_version;
uint32_t scratch[17];
- uint32_t pc;
+ uint32_t pc[DMUB_PC_SNAPSHOT_COUNT];
uint32_t undefined_address_fault_addr;
uint32_t inst_fetch_fault_addr;
uint32_t data_write_fault_addr;
@@ -308,6 +331,7 @@ struct dmub_diagnostic_data {
uint32_t inbox0_wptr;
uint32_t inbox0_size;
uint32_t gpint_datain0;
+ struct dmub_srv_debug timeout_info;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -361,7 +385,8 @@ struct dmub_srv_hw_funcs {
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
@@ -443,7 +468,6 @@ struct dmub_srv_create_params {
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs *hw_funcs;
void *user_ctx;
- struct dc_context *dc_ctx;
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
@@ -455,6 +479,7 @@ struct dmub_srv_create_params {
* @user_ctx: user provided context for the dmub_srv
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
+ * @shared_state: dmub shared state between firmware and driver
* @fw_state: dmub firmware state pointer
*/
struct dmub_srv {
@@ -463,6 +488,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -495,6 +521,7 @@ struct dmub_srv {
struct dmub_visual_confirm_color visual_confirm_color;
enum dmub_srv_power_state_type power_state;
+ struct dmub_srv_debug debug;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index e699731ee68e..e85fd3ac52c7 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -26,15 +26,6 @@
#ifndef DMUB_CMD_H
#define DMUB_CMD_H
-#if defined(_TEST_HARNESS) || defined(FPGA_USB4)
-#include "dmub_fw_types.h"
-#include "include_legacy/atomfirmware.h"
-
-#if defined(_TEST_HARNESS)
-#include <string.h>
-#endif
-#else
-
#include <asm/byteorder.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -42,8 +33,6 @@
#include "atomfirmware.h"
-#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4)
-
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
@@ -108,6 +97,9 @@
/* Maximum number of planes on any ASIC. */
#define DMUB_MAX_PLANES 6
+/* Maximum number of phantom planes on any ASIC */
+#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2)
+
/* Trace buffer offset for entry */
#define TRACE_BUFFER_ENTRY_OFFSET 16
@@ -205,6 +197,11 @@ union abm_flags {
* of user backlight level.
*/
unsigned int abm_gradual_bl_change : 1;
+
+ /**
+ * @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady
+ */
+ unsigned int abm_new_frame : 1;
} bitfields;
unsigned int u32All;
@@ -403,15 +400,16 @@ union replay_debug_flags {
/**
* 0x400 (bit 10)
- * @force_disable_ips1: Force disable IPS1 state
+ * @enable_ips_visual_confirm: Enable IPS visual confirm when entering IPS
+ * If we enter IPS2, the Visual confirm bar will change to yellow
*/
- uint32_t force_disable_ips1 : 1;
+ uint32_t enable_ips_visual_confirm : 1;
/**
* 0x800 (bit 11)
- * @force_disable_ips2: Force disable IPS2 state
+ * @enable_ips_residency_profiling: Enable IPS residency profiling
*/
- uint32_t force_disable_ips2 : 1;
+ uint32_t enable_ips_residency_profiling : 1;
uint32_t reserved : 20;
} bitfields;
@@ -471,7 +469,7 @@ struct dmub_feature_caps {
* Max PSR version supported by FW.
*/
uint8_t psr;
- uint8_t fw_assisted_mclk_switch;
+ uint8_t fw_assisted_mclk_switch_ver;
uint8_t reserved[4];
uint8_t subvp_psr_support;
uint8_t gecc_enable;
@@ -518,6 +516,8 @@ struct dmub_visual_confirm_color {
* @trace_buffer_size: size of the tracebuffer region
* @fw_version: the firmware version information
* @dal_fw: 1 if the firmware is DAL
+ * @shared_state_size: size of the shared state region in bytes
+ * @shared_state_features: number of shared state features
*/
struct dmub_fw_meta_info {
uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */
@@ -526,6 +526,9 @@ struct dmub_fw_meta_info {
uint32_t fw_version; /**< the firmware version information */
uint8_t dal_fw; /**< 1 if the firmware is DAL */
uint8_t reserved[3]; /**< padding bits */
+ uint32_t shared_state_size; /**< size of the shared state region in bytes */
+ uint16_t shared_state_features; /**< number of shared state features */
+ uint16_t reserved2; /**< padding bytes */
};
/**
@@ -624,6 +627,7 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2 = 3,
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
+ DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -658,6 +662,7 @@ union dmub_fw_boot_options {
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
uint32_t ips_disable: 3; /* options to disable ips support*/
+ uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t reserved : 9; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
@@ -670,6 +675,123 @@ enum dmub_fw_boot_options_bit {
};
//==============================================================================
+//< DMUB_SHARED_STATE>==========================================================
+//==============================================================================
+
+/**
+ * Shared firmware state between driver and firmware for lockless communication
+ * in situations where the inbox/outbox may be unavailable.
+ *
+ * Each structure *must* be at most 256-bytes in size. The layout allocation is
+ * described below:
+ *
+ * [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]...
+ */
+
+/**
+ * enum dmub_shared_state_feature_id - List of shared state features.
+ */
+enum dmub_shared_state_feature_id {
+ DMUB_SHARED_SHARE_FEATURE__INVALID = 0,
+ DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
+ DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
+ DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
+};
+
+/**
+ * struct dmub_shared_state_ips_fw - Firmware signals for IPS.
+ */
+union dmub_shared_state_ips_fw_signals {
+ struct {
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
+ uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
+ uint32_t reserved_bits : 29; /**< Reversed */
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * struct dmub_shared_state_ips_signals - Firmware signals for IPS.
+ */
+union dmub_shared_state_ips_driver_signals {
+ struct {
+ uint32_t allow_pg : 1; /**< 1 if PG is allowed */
+ uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
+ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
+ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
+ uint32_t reserved_bits : 28; /**< Reversed bits */
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * IPS FW Version
+ */
+#define DMUB_SHARED_STATE__IPS_FW_VERSION 1
+
+/**
+ * struct dmub_shared_state_ips_fw - Firmware state for IPS.
+ */
+struct dmub_shared_state_ips_fw {
+ union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
+ uint32_t rcg_entry_count; /**< Entry counter for RCG */
+ uint32_t rcg_exit_count; /**< Exit counter for RCG */
+ uint32_t ips1_entry_count; /**< Entry counter for IPS1 */
+ uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
+ uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
+ uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
+ uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
+}; /* 248-bytes, fixed */
+
+/**
+ * IPS Driver Version
+ */
+#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1
+
+/**
+ * struct dmub_shared_state_ips_driver - Driver state for IPS.
+ */
+struct dmub_shared_state_ips_driver {
+ union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */
+ uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+}; /* 248-bytes, fixed */
+
+/**
+ * enum dmub_shared_state_feature_common - Generic payload.
+ */
+struct dmub_shared_state_feature_common {
+ uint32_t padding[62];
+}; /* 248-bytes, fixed */
+
+/**
+ * enum dmub_shared_state_feature_header - Feature description.
+ */
+struct dmub_shared_state_feature_header {
+ uint16_t id; /**< Feature ID */
+ uint16_t version; /**< Feature version */
+ uint32_t reserved; /**< Reserved bytes. */
+}; /* 8 bytes, fixed */
+
+/**
+ * struct dmub_shared_state_feature_block - Feature block.
+ */
+struct dmub_shared_state_feature_block {
+ struct dmub_shared_state_feature_header header; /**< Shared state header. */
+ union dmub_shared_feature_state_union {
+ struct dmub_shared_state_feature_common common; /**< Generic data */
+ struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
+ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
+ } data; /**< Shared state data. */
+}; /* 256-bytes, fixed */
+
+/**
+ * Shared state size in bytes.
+ */
+#define DMUB_FW_HEADER_SHARED_STATE_SIZE \
+ ((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block))
+
+//==============================================================================
//</DMUB_STATUS>================================================================
//==============================================================================
//< DMUB_VBIOS>=================================================================
@@ -707,6 +829,10 @@ enum dmub_cmd_vbios_type {
*/
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
/**
+ * Control PHY FSM
+ */
+ DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM = 29,
+ /**
* Controls domain power gating
*/
DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
@@ -1081,6 +1207,11 @@ enum dmub_cmd_type {
*/
DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+ /**
+ * Command type used for all PSP commands.
+ */
+ DMUB_CMD__PSP = 88,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1270,11 +1401,11 @@ struct dmub_cmd_PLAT_54186_wa {
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */
struct {
- uint8_t hubp_inst : 4; /**< HUBP instance */
- uint8_t tmz_surface : 1; /**< TMZ enable or disable */
- uint8_t immediate :1; /**< Immediate flip */
- uint8_t vmid : 4; /**< VMID */
- uint8_t grph_stereo : 1; /**< 1 if stereo */
+ uint32_t hubp_inst : 4; /**< HUBP instance */
+ uint32_t tmz_surface : 1; /**< TMZ enable or disable */
+ uint32_t immediate :1; /**< Immediate flip */
+ uint32_t vmid : 4; /**< VMID */
+ uint32_t grph_stereo : 1; /**< 1 if stereo */
uint32_t reserved : 21; /**< Reserved */
} flip_params; /**< Pageflip parameters */
uint32_t reserved[9]; /**< Reserved bits */
@@ -1483,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
- uint8_t pad[1];
+ uint8_t reserved[59];
};
/**
@@ -2204,6 +2335,11 @@ enum phy_link_rate {
* UHBR10 - 20.0 Gbps/Lane
*/
PHY_RATE_2000 = 11,
+
+ PHY_RATE_675 = 12,
+ /**
+ * Rate 12 - 6.75 Gbps/Lane
+ */
};
/**
@@ -2222,6 +2358,7 @@ enum dmub_phy_fsm_state {
DMUB_PHY_FSM_POWER_DOWN,
DMUB_PHY_FSM_PLL_EN,
DMUB_PHY_FSM_TX_EN,
+ DMUB_PHY_FSM_TX_EN_TEST_MODE,
DMUB_PHY_FSM_FAST_LP,
DMUB_PHY_FSM_P2_PLL_OFF_CPM,
DMUB_PHY_FSM_P2_PLL_OFF_PG,
@@ -2826,18 +2963,49 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+/**
+ * Definition of Replay Residency GPINT command.
+ * Bit[0] - Residency mode for Revision 0
+ * Bit[1] - Enable/Disable state
+ * Bit[2-3] - Revision number
+ * Bit[4-7] - Residency mode for Revision 1
+ * Bit[8] - Panel instance
+ * Bit[9-15] - Reserved
+ */
+
+enum pr_residency_mode {
+ PR_RESIDENCY_MODE_PHY = 0x0,
+ PR_RESIDENCY_MODE_ALPM,
+ PR_RESIDENCY_MODE_IPS2,
+ PR_RESIDENCY_MODE_FRAME_CNT,
+ PR_RESIDENCY_MODE_ENABLEMENT_PERIOD,
+};
+
#define REPLAY_RESIDENCY_MODE_SHIFT (0)
#define REPLAY_RESIDENCY_ENABLE_SHIFT (1)
+#define REPLAY_RESIDENCY_REVISION_SHIFT (2)
+#define REPLAY_RESIDENCY_MODE2_SHIFT (4)
#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_IPS 0x10
+# define REPLAY_RESIDENCY_FIELD_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+
+#define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD (0x3 << REPLAY_RESIDENCY_MODE2_SHIFT)
#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
+#define REPLAY_RESIDENCY_REVISION_MASK (0x3 << REPLAY_RESIDENCY_REVISION_SHIFT)
+# define REPLAY_RESIDENCY_REVISION_0 (0x0 << REPLAY_RESIDENCY_REVISION_SHIFT)
+# define REPLAY_RESIDENCY_REVISION_1 (0x1 << REPLAY_RESIDENCY_REVISION_SHIFT)
+
+/**
+ * Definition of a replay_state.
+ */
enum replay_state {
REPLAY_STATE_0 = 0x0,
REPLAY_STATE_1 = 0x10,
@@ -2899,6 +3067,11 @@ enum dmub_cmd_replay_type {
* Set pseudo vtotal
*/
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
+ /**
+ * Set adaptive sync sdp enabled
+ */
+ DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
+
};
/**
@@ -3100,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal {
*/
uint8_t pad;
};
+struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * enabled: set adaptive sync sdp enabled
+ */
+ uint8_t force_disabled;
+
+ uint8_t pad[2];
+};
/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
@@ -3133,6 +3320,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -3196,6 +3391,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data;
+};
+
+/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_cmd_replay_frameupdate_timer_data {
@@ -3250,6 +3459,11 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
*/
struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data.
+ */
+ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
+
};
/**
@@ -3332,7 +3546,7 @@ enum hw_lock_client {
/**
* Replay is the client of HW Lock Manager.
*/
- HW_LOCK_CLIENT_REPLAY = 4,
+ HW_LOCK_CLIENT_REPLAY = 4,
/**
* Invalid client.
*/
@@ -3925,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type {
* Queries backlight info for the embedded panel.
*/
DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1,
+ /**
+ * Sets the PWM Freq as per user's requirement.
+ */
+ DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2,
};
/**
@@ -4026,6 +4244,34 @@ struct dmub_rb_cmd_transmitter_query_dp_alt {
struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */
};
+struct phy_test_mode {
+ uint8_t mode;
+ uint8_t pat0;
+ uint8_t pad[2];
+};
+
+/**
+ * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+struct dmub_rb_cmd_transmitter_set_phy_fsm_data {
+ uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t mode; /**< HDMI/DP/DP2 etc */
+ uint8_t lane_num; /**< Number of lanes */
+ uint32_t symclk_100Hz; /**< PLL symclock in 100hz */
+ struct phy_test_mode test_mode;
+ enum dmub_phy_fsm_state state;
+ uint32_t status;
+ uint8_t pad;
+};
+
+/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+struct dmub_rb_cmd_transmitter_set_phy_fsm {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_transmitter_set_phy_fsm_data data; /**< payload */
+};
+
/**
* Maximum number of bytes a chunk sent to DMUB for parsing
*/
@@ -4148,6 +4394,65 @@ struct dmub_rb_cmd_secure_display {
};
/**
+ * Command type of a DMUB_CMD__PSP command
+ */
+enum dmub_cmd_psp_type {
+ DMUB_CMD__PSP_ASSR_ENABLE = 0
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+struct dmub_cmd_assr_enable_data {
+ /**
+ * ASSR enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * PHY port type.
+ * Indicates eDP / non-eDP port type
+ */
+ uint8_t phy_port_type;
+ /**
+ * PHY port ID.
+ */
+ uint8_t phy_port_id;
+ /**
+ * Link encoder index.
+ */
+ uint8_t link_enc_index;
+ /**
+ * HPO mode.
+ */
+ uint8_t hpo_mode;
+
+ /**
+ * Reserved field.
+ */
+ uint8_t reserved[7];
+};
+
+/**
+ * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+struct dmub_rb_cmd_assr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Assr data.
+ */
+ struct dmub_cmd_assr_enable_data assr_data;
+
+ /**
+ * Reserved field.
+ */
+ uint32_t reserved[3];
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -4338,6 +4643,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt;
/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+ struct dmub_rb_cmd_transmitter_set_phy_fsm set_phy_fsm;
+ /**
* Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
*/
struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
@@ -4405,6 +4714,15 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
*/
struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+ struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp;
+ /**
+ * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+ struct dmub_rb_cmd_assr_enable assr_enable;
+
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index 08aaf84affaf..50a98448e2e8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -25,6 +25,7 @@ DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o
DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn32.o
DMUB += dmub_dcn35.o
+DMUB += dmub_dcn351.o
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 98dad0d47e72..e500ca9ae09c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
uint64_t fb_base, fb_offset;
@@ -471,4 +472,5 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+ diag_data->timeout_info = dmub->debug;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 1df128e57ed3..de287b101848 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index 81dae75e9ff8..a4abe951c838 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
index 9a3afffd9b0f..066f35a50094 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
@@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
#endif /* _DMUB_DCN30_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 094e9f864557..662c34e9495c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
@@ -465,6 +466,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+ diag_data->timeout_info = dmub->debug;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 4d520a893c7b..eccdab4986ce 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 2daa1e0c8061..e1da270502cc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -32,8 +32,6 @@
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
-#define DCN_BASE__INST0_SEG2 0x000034C0
-
#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define CTX dmub
#define REGS dmub->regs_dcn32
@@ -218,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
@@ -479,6 +478,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->is_cw6_enabled = is_cw6_enabled;
diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+
+ diag_data->timeout_info = dmub->debug;
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index b0cd8d29402f..29c1132951af 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 6d1fbea0f6ba..70e63aeb8f89 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
@@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
DMCUB_REGION3_CW6_ENABLE, 1);
+
+ offset = region6->offset;
+
+ REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0,
+ DMCUB_REGION6_TOP_ADDRESS,
+ region6->region.top - region6->region.base - 1,
+ DMCUB_REGION6_ENABLE, 1);
}
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
@@ -410,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds;
boot_options.bits.disable_clk_gate = params->disable_clock_gate;
boot_options.bits.ips_disable = params->disable_ips;
+ boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -506,6 +517,7 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->is_cw6_enabled = is_cw6_enabled;
diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+ diag_data->timeout_info = dmub->debug;
}
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
@@ -545,8 +557,14 @@ uint32_t dmub_dcn35_read_inbox0_ack_register(struct dmub_srv *dmub)
bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub)
{
union dmub_fw_boot_status status;
+ uint32_t is_enable;
+
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
+ if (is_enable == 0)
+ return false;
status.all = REG_READ(DMCUB_SCRATCH0);
- return status.bits.hw_power_init_done;
+ return (status.bits.dal_fw && status.bits.hw_power_init_done && status.bits.mailbox_rdy) ||
+ (!status.bits.dal_fw && status.bits.mailbox_rdy);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 129a7031d2ae..686e97c00ccc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -154,6 +157,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c
new file mode 100644
index 000000000000..8f40b9f6706c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#include "../dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn351.h"
+
+#include "dcn/dcn_3_5_1_offset.h"
+#include "dcn/dcn_3_5_1_sh_mask.h"
+
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+#define CTX dmub
+#define REGS dmub->regs_dcn35
+#define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+void dmub_srv_dcn351_regs_init(struct dmub_srv *dmub, struct dc_context *ctx)
+{
+ struct dmub_srv_dcn35_regs *regs = dmub->regs_dcn35;
+#define REG_STRUCT regs
+
+#define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg);
+ DMUB_DCN35_REGS()
+ DMCUB_INTERNAL_REGS()
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+#undef REG_STRUCT
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h
new file mode 100644
index 000000000000..4121fa1b301d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef _DMUB_DCN351_H_
+#define _DMUB_DCN351_H_
+
+#include "dmub_dcn35.h"
+
+struct dmub_srv;
+
+void dmub_srv_dcn351_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+
+#endif /* _DMUB_DCN351_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 9ad738805320..90e878195d95 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -37,6 +37,7 @@
#include "dmub_dcn316.h"
#include "dmub_dcn32.h"
#include "dmub_dcn35.h"
+#include "dmub_dcn351.h"
#include "os_types.h"
/*
* Note: the DMUB service is standalone. No additional headers should be
@@ -78,6 +79,7 @@
#define DMUB_CW6_BASE (0x66000000)
#define DMUB_REGION5_BASE (0xA0000000)
+#define DMUB_REGION6_BASE (0xC0000000)
static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
@@ -314,6 +316,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
break;
case DMUB_ASIC_DCN35:
+ case DMUB_ASIC_DCN351:
dmub->regs_dcn35 = &dmub_srv_dcn35_regs;
funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory;
funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd;
@@ -350,6 +353,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
+ if (asic == DMUB_ASIC_DCN351)
+ funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
funcs->should_detect = dmub_dcn35_should_detect;
@@ -417,58 +422,44 @@ void dmub_srv_destroy(struct dmub_srv *dmub)
dmub_memset(dmub, 0, sizeof(*dmub));
}
+static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out,
+ const uint32_t *window_sizes,
+ enum dmub_window_memory_type memory_type)
+{
+ uint32_t i, top = 0;
+
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (params->window_memory_type[i] == memory_type) {
+ struct dmub_region *region = &out->regions[i];
+
+ region->base = dmub_align(top, 256);
+ region->top = region->base + dmub_align(window_sizes[i], 64);
+ top = region->top;
+ }
+ }
+
+ return dmub_align(top, 4096);
+}
+
enum dmub_status
-dmub_srv_calc_region_info(struct dmub_srv *dmub,
- const struct dmub_srv_region_params *params,
- struct dmub_srv_region_info *out)
+ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out)
{
- struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
- struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
- struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
- struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
- struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
- struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
- struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
- struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
- uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
- uint32_t previous_top = 0;
+ uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
memset(out, 0, sizeof(*out));
+ memset(window_sizes, 0, sizeof(window_sizes));
out->num_regions = DMUB_NUM_WINDOWS;
- inst->base = 0x0;
- inst->top = inst->base + params->inst_const_size;
-
- data->base = dmub_align(inst->top, 256);
- data->top = data->base + params->bss_data_size;
-
- /*
- * All cache windows below should be aligned to the size
- * of the DMCUB cache line, 64 bytes.
- */
-
- stack->base = dmub_align(data->top, 256);
- stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
-
- bios->base = dmub_align(stack->top, 256);
- bios->top = bios->base + params->vbios_size;
-
- if (params->is_mailbox_in_inbox) {
- mail->base = 0;
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
- previous_top = bios->top;
- } else {
- mail->base = dmub_align(bios->top, 256);
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
- previous_top = mail->top;
- }
-
fw_info = dmub_get_fw_meta_info(params);
if (fw_info) {
@@ -486,19 +477,21 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version;
}
- trace_buff->base = dmub_align(previous_top, 256);
- trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
-
- fw_state->base = dmub_align(trace_buff->top, 256);
- fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
+ window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size;
+ window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+ window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size;
+ window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size;
+ window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
+ window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
+ window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
- scratch_mem->base = dmub_align(fw_state->top, 256);
- scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+ out->fb_size =
+ dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
- out->fb_size = dmub_align(scratch_mem->top, 4096);
-
- if (params->is_mailbox_in_inbox)
- out->inbox_size = dmub_align(mail->top, 4096);
+ out->gart_size =
+ dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART);
return DMUB_STATUS_OK;
}
@@ -507,8 +500,6 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out)
{
- uint8_t *cpu_base;
- uint64_t gpu_base;
uint32_t i;
if (!dmub->sw_init)
@@ -519,19 +510,16 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID;
- cpu_base = (uint8_t *)params->cpu_fb_addr;
- gpu_base = params->gpu_fb_addr;
-
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg =
&params->region_info->regions[i];
- out->fb[i].cpu_addr = cpu_base + reg->base;
- out->fb[i].gpu_addr = gpu_base + reg->base;
-
- if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
- out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
- out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
+ if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base;
+ } else {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base;
}
out->fb[i].size = reg->top - reg->base;
@@ -583,9 +571,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
- struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
+ struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
if (!dmub->sw_init)
@@ -670,10 +659,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ region6.offset.quad_part = shared_state_fb->gpu_addr;
+ region6.region.base = DMUB_CW6_BASE;
+ region6.region.top = region6.region.base + shared_state_fb->size;
+
+ dmub->shared_state = shared_state_fb->cpu_addr;
+
dmub->scratch_mem_fb = *scratch_mem_fb;
if (dmub->hw_funcs.setup_windows)
- dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
+ dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
if (dmub->hw_funcs.setup_outbox0)
dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
@@ -812,8 +807,10 @@ bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub)
if (!dmub->hw_funcs.is_hw_powered_up)
return true;
- return dmub->hw_funcs.is_hw_powered_up(dmub) &&
- dmub->hw_funcs.is_hw_init(dmub);
+ if (!dmub->hw_funcs.is_hw_powered_up(dmub))
+ return false;
+
+ return true;
}
enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub,
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
index 915a031a43cb..e4a26143f14c 100644
--- a/drivers/gpu/drm/amd/display/include/audio_types.h
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -27,11 +27,21 @@
#define __AUDIO_TYPES_H__
#include "signal_types.h"
+#include "fixed31_32.h"
+#include "dc_dp_types.h"
#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
+struct audio_dp_link_info {
+ uint32_t link_bandwidth_kbps;
+ uint32_t hblank_min_symbol_width;
+ enum dp_link_encoding encoding;
+ enum dc_link_rate link_rate;
+ enum dc_lane_count lane_count;
+ bool is_mst;
+};
struct audio_crtc_info {
uint32_t h_total;
@@ -42,7 +52,10 @@ struct audio_crtc_info {
uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */
uint32_t refresh_rate;
enum dc_color_depth color_depth;
+ enum dc_pixel_encoding pixel_encoding;
bool interlaced;
+ uint32_t dsc_bits_per_pixel;
+ uint32_t dsc_num_slices;
};
struct azalia_clock_info {
uint32_t pixel_clock_in_10khz;
@@ -95,6 +108,8 @@ struct audio_output {
enum signal_type signal;
/* video timing */
struct audio_crtc_info crtc_info;
+ /* DP link info */
+ struct audio_dp_link_info dp_link_info;
/* PLL for audio */
struct audio_pll_info pll_info;
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index e317089cf6ee..c9ec46c6b4c6 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -250,11 +250,13 @@ enum {
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
#define GC_11_0_3_A0 0x20
+#define GC_11_0_4_A0 0xC0
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 1c6f24cb1d2f..447768dec887 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -27,7 +27,6 @@
#define __DAL_TYPES_H__
#include "signal_types.h"
-#include "dc_types.h"
struct dal_logger;
struct dc_bios;
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c6bbd262f1ac..08ee0350b31f 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -226,8 +226,8 @@ enum dp_alt_mode {
struct graphics_object_id {
uint32_t id:8;
- uint32_t enum_id:4;
- uint32_t type:4;
+ enum object_enum_id enum_id;
+ enum object_type type;
uint32_t reserved:16; /* for padding. total size should be u32 */
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1b8ab20f1715..1867aac57cf2 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -73,7 +73,6 @@ struct link_training_settings {
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
bool should_set_fec_ready;
- /* TODO - factor lane_settings out because it changes during LT */
union dc_dp_ffe_preset *ffe_preset;
uint16_t cr_pattern_time;
@@ -169,6 +168,15 @@ enum dp_test_pattern {
DP_TEST_PATTERN_UNSUPPORTED
};
+#define IS_DP_PHY_SQUARE_PATTERN(test_pattern)\
+ (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&\
+ test_pattern <= DP_TEST_PATTERN_SQUARE_END)
+
+#define IS_DP_PHY_PATTERN(test_pattern)\
+ ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&\
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||\
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+
enum dp_test_pattern_color_space {
DP_TEST_PATTERN_COLOR_SPACE_RGB,
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f39e2785e618..83479951732a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -64,6 +64,7 @@
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
+#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__)
struct dc_log_buffer_ctx {
char *buf;
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index 1b14b17a79c7..a10d6b988aab 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,19 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+static inline bool dc_is_tmds_signal(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return true;
+ break;
+ default:
+ return false;
+ }
+}
+
static inline bool dc_is_dvi_single_link_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 8b5c27857671..3699e633801d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1059,7 +1059,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
struct fixed31_32 min_display;
struct fixed31_32 max_content;
struct fixed31_32 clip = dc_fixpt_one;
- struct fixed31_32 output;
+ struct fixed31_32 output = dc_fixpt_zero;
bool use_eetf = false;
bool is_clipped = false;
struct fixed31_32 sdr_white_level;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 3955b7e4b2e2..d09627c15b9c 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -158,13 +158,13 @@ static unsigned int calc_v_total_from_duration(
if (duration_in_us > vrr->max_duration_in_us)
duration_in_us = vrr->max_duration_in_us;
- if (dc_is_hdmi_signal(stream->signal)) {
+ if (dc_is_hdmi_signal(stream->signal)) { // change for HDMI to comply with spec
uint32_t h_total_up_scaled;
h_total_up_scaled = stream->timing.h_total * 10000;
v_total = div_u64((unsigned long long)duration_in_us
* stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
- h_total_up_scaled);
+ h_total_up_scaled); //ceiling for MMax and MMin for MVRR
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
@@ -1057,7 +1057,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->fixed_refresh_in_uhz = 0;
refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) -
-+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
in_out_vrr->supported = true;
}
@@ -1126,6 +1126,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
}
+
+ in_out_vrr->adjust.allow_otg_v_count_halt = (in_config->state == VRR_STATE_ACTIVE_FIXED) ? true : false;
}
void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 733f22bed021..c996365e84b0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -151,7 +151,7 @@ out:
static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE;
uint8_t size;
uint16_t max_wait = 20; // units of ms
uint16_t num_polls = 5;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index f7b5583ee609..8e9caae7c955 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
+ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
+ return MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
+ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
+ return MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e..7c9805705fd3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 5960dd760e91..8ce6c22e5d04 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
unsigned int length);
void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_freesync(struct mod_stats *mod_stats,
unsigned int v_total_min,
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 738ee763f24a..a344e2e49b0e 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_feature_enabled) {
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
- }
-
- if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
@@ -539,8 +536,6 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
break;
case FREESYNC_TYPE_PCON_IN_WHITELIST:
- mod_build_adaptive_sync_infopacket_v1(info_packet);
- break;
case ADAPTIVE_SYNC_TYPE_EDP:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e304e8435fb8..2a3698fd2dc2 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal)
+ uint32_t vtotal)
{
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index bef4815e1703..ff7e6f3cd6be 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal);
+ uint32_t vtotal);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);